diff --git a/go.mod b/go.mod index 03038c5b01..c1edcef0a3 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/google/ko go 1.21 require ( - github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240323062759-1fd604ae58de + github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240514230400-03fa26f5508f github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 github.com/docker/docker v26.1.2+incompatible github.com/dprotaso/go-yit v0.0.0-20220510233725-9ba8df137936 @@ -15,6 +15,7 @@ require ( github.com/sigstore/cosign/v2 v2.2.3 github.com/spf13/cobra v1.8.0 github.com/spf13/viper v1.18.2 + github.com/stretchr/testify v1.9.0 go.uber.org/automaxprocs v1.5.3 golang.org/x/sync v0.7.0 golang.org/x/tools v0.21.0 @@ -39,24 +40,25 @@ require ( github.com/Microsoft/go-winio v0.6.1 // indirect github.com/alessio/shellescape v1.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2 v1.26.0 // indirect - github.com/aws/aws-sdk-go-v2/config v1.27.9 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.9 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4 // indirect + github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect + github.com/aws/aws-sdk-go-v2/config v1.27.13 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.13 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ecr v1.27.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.20.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.28.5 // indirect - github.com/aws/smithy-go v1.20.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.28.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.20.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.28.7 // indirect + github.com/aws/smithy-go v1.20.2 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect github.com/docker/cli v24.0.7+incompatible // indirect @@ -100,6 +102,7 @@ require ( github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect diff --git a/go.sum b/go.sum index e22627baeb..0e360f7919 100644 --- a/go.sum +++ b/go.sum @@ -37,38 +37,38 @@ github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVK github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go-v2 v1.26.0 h1:/Ce4OCiM3EkpW7Y+xUnfAFpchU78K7/Ug01sZni9PgA= -github.com/aws/aws-sdk-go-v2 v1.26.0/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I= -github.com/aws/aws-sdk-go-v2/config v1.27.9 h1:gRx/NwpNEFSk+yQlgmk1bmxxvQ5TyJ76CWXs9XScTqg= -github.com/aws/aws-sdk-go-v2/config v1.27.9/go.mod h1:dK1FQfpwpql83kbD873E9vz4FyAxuJtR22wzoXn3qq0= -github.com/aws/aws-sdk-go-v2/credentials v1.17.9 h1:N8s0/7yW+h8qR8WaRlPQeJ6czVMNQVNtNdUqf6cItao= -github.com/aws/aws-sdk-go-v2/credentials v1.17.9/go.mod h1:446YhIdmSV0Jf/SLafGZalQo+xr2iw7/fzXGDPTU1yQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0 h1:af5YzcLf80tv4Em4jWVD75lpnOHSBkPUZxZfGkrI3HI= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0/go.mod h1:nQ3how7DMnFMWiU1SpECohgC82fpn4cKZ875NDMmwtA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4 h1:0ScVK/4qZ8CIW0k8jOeFVsyS/sAiXpYxRBLolMkuLQM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4/go.mod h1:84KyjNZdHC6QZW08nfHI6yZgPd+qRgaWcYsyLUo3QY8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4 h1:sHmMWWX5E7guWEFQ9SVo6A3S4xpPrWnd77a6y4WM6PU= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4/go.mod h1:WjpDrhWisWOIoS9n3nk67A3Ll1vfULJ9Kq6h29HTD48= +github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= +github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= +github.com/aws/aws-sdk-go-v2/config v1.27.13 h1:WbKW8hOzrWoOA/+35S5okqO/2Ap8hkkFUzoW8Hzq24A= +github.com/aws/aws-sdk-go-v2/config v1.27.13/go.mod h1:XLiyiTMnguytjRER7u5RIkhIqS8Nyz41SwAWb4xEjxs= +github.com/aws/aws-sdk-go-v2/credentials v1.17.13 h1:XDCJDzk/u5cN7Aple7D/MiAhx1Rjo/0nueJ0La8mRuE= +github.com/aws/aws-sdk-go-v2/credentials v1.17.13/go.mod h1:FMNcjQrmuBYvOTZDtOLCIu0esmxjF7RuA/89iSXWzQI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/service/ecr v1.27.3 h1:gfgt0D8MGL3gHrJPEv4rcWptA4Nz7uYn25ls8lLiANw= -github.com/aws/aws-sdk-go-v2/service/ecr v1.27.3/go.mod h1:O5Fvd41s5KfDG093xLM7FhGiH6EmhmEli5D5MQH3TWw= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.3 h1:gaq/4fd2/bQeJ33m4csgL7DJHrrmvGhqnrsxchNr46c= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.3/go.mod h1:vn+Rz9fAFGJtDXbBmYdTc71Q8iF/W/uK1/ec93hinD8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6 h1:b+E7zIUHMmcB4Dckjpkapoy47W6C9QBv/zoUP+Hn8Kc= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6/go.mod h1:S2fNV0rxrP78NhPbCZeQgY8H9jdDMeGtwcfZIRxzBqU= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.3 h1:mnbuWHOcM70/OFUlZZ5rcdfA8PflGXXiefU/O+1S3+8= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.3/go.mod h1:5HFu51Elk+4oRBZVxmHrSds5jFXmFj8C3w7DVF2gnrs= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3 h1:uLq0BKatTmDzWa/Nu4WO0M1AaQDaPpwTKAeByEc6WFM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3/go.mod h1:b+qdhjnxj8GSR6t5YfphOffeoQSQ1KmpoVVuBn+PWxs= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.5 h1:J/PpTf/hllOjx8Xu9DMflff3FajfLxqM5+tepvVXmxg= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.5/go.mod h1:0ih0Z83YDH/QeQ6Ori2yGE2XvWYv/Xm+cZc01LC6oK0= -github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= -github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240323062759-1fd604ae58de h1:GpfVZg7GMoefvIxKI+BC+4Fap8eCAAQ7a6Ww2l9HRnM= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240323062759-1fd604ae58de/go.mod h1:pJQomIo4A5X9k8E30Q4A2BAJckIwQhC1TAFN/WXCkdk= +github.com/aws/aws-sdk-go-v2/service/ecr v1.28.0 h1:rdPrcOZmqT2F+yzmKEImrx5XUs7Hpf4V9Rp6E8mhsxQ= +github.com/aws/aws-sdk-go-v2/service/ecr v1.28.0/go.mod h1:if7ybzzjOmDB8pat9FE35AHTY6ZxlYSy3YviSmFZv8c= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.5 h1:452e/nFuqPvwPg+1OD2CG/v29R9MH8egJSJKh2Qduv8= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.5/go.mod h1:8pvvNAklmq+hKmqyvFoMRg0bwg9sdGOvdwximmKiKP0= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.6 h1:o5cTaeunSpfXiLTIBx5xo2enQmiChtu1IBbzXnfU9Hs= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.6/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.0 h1:Qe0r0lVURDDeBQJ4yP+BOrJkvkiCo/3FH/t+wY11dmw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.0/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.7 h1:et3Ta53gotFR4ERLXXHIHl/Uuk1qYpP5uU7cvNql8ns= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.7/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240514230400-03fa26f5508f h1:Z0kS9pJDQgCg3u2lH6+CdYaFbyQtyukVTiUCG6re0E4= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240514230400-03fa26f5508f/go.mod h1:rAE739ssmE5O5fLuQ2y8uHdmOJaelE5I0Es3SxV0y1A= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= diff --git a/integration_test.sh b/integration_test.sh index a09733e51b..7da822763f 100755 --- a/integration_test.sh +++ b/integration_test.sh @@ -10,10 +10,15 @@ pushd "$ROOT_DIR" ROOT_DIR="$(pwd)" echo "Moving GOPATH into /tmp/ to test modules behavior." +export GOPATH="${GOPATH:-$(go env GOPATH)}" export ORIGINAL_GOPATH="$GOPATH" GOPATH="$(mktemp -d)" export GOPATH +export CGO_ENABLED=0 + +GOARCH="${GOARCH:-$(go env GOARCH)}" + pushd "$GOPATH" || exit 1 echo "Copying ko to temp gopath." @@ -24,88 +29,30 @@ pushd "$GOPATH/src/github.com/google/ko" || exit 1 echo "Building ko" -RESULT="$(GO111MODULE="on" GOFLAGS="-mod=vendor" go build .)" +RESULT="$(go build .)" echo "Beginning scenarios." FILTER="[^ ]local[^ ]*" -echo "1. Go module auto mode should create an image that outputs 'Hello World' when run outside the module." - -pushd .. || exit 1 -RESULT="$(GO111MODULE=auto GOFLAGS="-mod=vendor" ./ko/ko build --local "$GOPATH/src/github.com/google/ko/test" | grep "$FILTER" | xargs -I% docker run %)" -if [[ "$RESULT" != *"Hello there"* ]]; then - echo "Test FAILED. Saw $RESULT" && exit 1 -else - echo "Test PASSED" -fi - -popd || exit 1 - -echo "2. Auto inside the module with vendoring should output Hello there" - -RESULT="$(GO111MODULE=auto GOFLAGS="-mod=vendor" ./ko build --local "$GOPATH/src/github.com/google/ko/test" | grep "$FILTER" | xargs -I% docker run %)" -if [[ "$RESULT" != *"Hello there"* ]]; then - echo "Test FAILED. Saw $RESULT" && exit 1 -else - echo "Test PASSED" -fi - -echo "3. Auto inside the module without vendoring should output Hello there" -RESULT="$(GO111MODULE=auto GOFLAGS="" ./ko build --local "$GOPATH/src/github.com/google/ko/test" | grep "$FILTER" | xargs -I% docker run %)" +echo "1. Test should create an image that outputs 'Hello World'." +RESULT="$(./ko build --local --platform="linux/$GOARCH" "$GOPATH/src/github.com/google/ko/test" | grep "$FILTER" | xargs -I% docker run %)" if [[ "$RESULT" != *"Hello there"* ]]; then echo "Test FAILED. Saw $RESULT" && exit 1 else echo "Test PASSED" fi -echo "4. On inside the module with vendor should output Hello there." -RESULT="$(GO111MODULE=on GOFLAGS="-mod=vendor" ./ko build --local "$GOPATH/src/github.com/google/ko/test" | grep "$FILTER" | xargs -I% docker run %)" -if [[ "$RESULT" != *"Hello there"* ]]; then - echo "Test FAILED. Saw $RESULT" && exit 1 -else - echo "Test PASSED" -fi - -echo "5. On inside the module without vendor should output Hello there" -RESULT="$(GO111MODULE=on GOFLAGS="" ./ko build --local "$GOPATH/src/github.com/google/ko/test" | grep "$FILTER" | xargs -I% docker run %)" -if [[ "$RESULT" != *"Hello there"* ]]; then - echo "Test FAILED. Saw $RESULT" && exit 1 -else - echo "Test PASSED" -fi - -echo "6. On outside the module should fail." -pushd .. || exit 1 -GO111MODULE=on ./ko/ko build --local "$GOPATH/src/github.com/google/ko/test" && exit 1 -popd || exit 1 - -echo "7. On outside with build config specifying the test module builds." -pushd test/build-configs || exit 1 -for app in foo bar ; do - # test both local and fully qualified import paths - for prefix in example.com . ; do - import_path=$prefix/$app/cmd - RESULT="$(GO111MODULE=on GOFLAGS="" ../../ko build --local $import_path | grep "$FILTER" | xargs -I% docker run %)" - if [[ "$RESULT" != *"$app"* ]]; then - echo "Test FAILED for $import_path. Saw $RESULT but expected $app" && exit 1 - else - echo "Test PASSED for $import_path" - fi - done -done -popd || exit 1 - -echo "9. Linux capabilities." +echo "2. Linux capabilities." pushd test/build-configs || exit 1 # run as non-root user with net_bind_service cap granted docker_run_opts="--user 1 --cap-add=net_bind_service" -RESULT="$(GO111MODULE=on GOFLAGS="" ../../ko build --local ./caps/cmd | grep "$FILTER" | xargs -I% docker run $docker_run_opts %)" +RESULT="$(../../ko build --local --platform="linux/$GOARCH" ./caps/cmd | grep "$FILTER" | xargs -I% docker run $docker_run_opts %)" if [[ "$RESULT" != "No capabilities" ]]; then echo "Test FAILED. Saw '$RESULT' but expected 'No capabilities'. Docker 'cap-add' must have no effect unless matching capabilities are granted to the file." && exit 1 fi # build with a different config requesting net_bind_service file capability -RESULT_WITH_FILE_CAPS="$(KO_CONFIG_PATH=caps.ko.yaml GO111MODULE=on GOFLAGS="" ../../ko build --local ./caps/cmd | grep "$FILTER" | xargs -I% docker run $docker_run_opts %)" +RESULT_WITH_FILE_CAPS="$(KO_CONFIG_PATH=caps.ko.yaml ../../ko build --local --platform="linux/$GOARCH" ./caps/cmd | grep "$FILTER" | xargs -I% docker run $docker_run_opts %)" if [[ "$RESULT_WITH_FILE_CAPS" != "Has capabilities"* ]]; then echo "Test FAILED. Saw '$RESULT_WITH_FILE_CAPS' but expected 'Has capabilities'. Docker 'cap-add' must work when matching capabilities are granted to the file." && exit 1 else diff --git a/pkg/build/gobuild_test.go b/pkg/build/gobuild_test.go index 8764386608..32b1d2ec3b 100644 --- a/pkg/build/gobuild_test.go +++ b/pkg/build/gobuild_test.go @@ -40,6 +40,7 @@ import ( "github.com/google/ko/pkg/internal/gittesting" specsv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sigstore/cosign/v2/pkg/oci" + "github.com/stretchr/testify/require" ) func repoRootDir() (string, error) { @@ -317,63 +318,49 @@ func TestBuildEnv(t *testing.T) { func TestCreateTemplateData(t *testing.T) { t.Run("env", func(t *testing.T) { t.Setenv("FOO", "bar") - params := createTemplateData(context.TODO(), buildContext{}) + params := createTemplateData(context.TODO(), buildContext{dir: t.TempDir()}) vars := params["Env"].(map[string]string) - if vars["FOO"] != "bar" { - t.Fatalf("vars[FOO]=%q, want %q", vars["FOO"], "bar") - } + require.Equal(t, "bar", vars["FOO"]) }) t.Run("empty creation time", func(t *testing.T) { - params := createTemplateData(context.TODO(), buildContext{}) + params := createTemplateData(context.TODO(), buildContext{dir: t.TempDir()}) // Make sure the date was set to time.Now(). actualDateStr := params["Date"].(string) actualDate, err := time.Parse(time.RFC3339, actualDateStr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if time.Since(actualDate) > time.Minute { t.Fatalf("expected date to be now, but was %v", actualDate) } // Check the timestamp. actualTimestampSec := params["Timestamp"].(int64) - actualTimestamp := time.Unix(actualTimestampSec, 0) - expectedTimestamp := actualDate.Truncate(time.Second) - if !actualTimestamp.Equal(expectedTimestamp) { - t.Fatalf("expected timestamp %v, but was %v", - expectedTimestamp, actualTimestamp) - } + actualTimestamp := time.Unix(actualTimestampSec, 0).UTC() + expectedTimestamp := actualDate.Truncate(time.Second).UTC() + require.Equal(t, expectedTimestamp, actualTimestamp) }) t.Run("creation time", func(t *testing.T) { // Create a reference time for use as a creation time. - expectedTime, err := time.Parse(time.RFC3339, "2012-11-01T22:08:41+00:00") - if err != nil { - t.Fatal(err) - } + expectedTime, err := time.Parse(time.RFC3339, "2012-11-01T22:08:00Z") + require.NoError(t, err) params := createTemplateData(context.TODO(), buildContext{ creationTime: v1.Time{Time: expectedTime}, + dir: t.TempDir(), }) // Check the date. actualDateStr := params["Date"].(string) actualDate, err := time.Parse(time.RFC3339, actualDateStr) - if err != nil { - t.Fatal(err) - } - if !actualDate.Equal(expectedTime) { - t.Fatalf("expected date to be %v, but was %v", expectedTime, actualDate) - } + require.NoError(t, err) + require.Equal(t, expectedTime, actualDate) // Check the timestamp. actualTimestampSec := params["Timestamp"].(int64) - actualTimestamp := time.Unix(actualTimestampSec, 0) - if !actualTimestamp.Equal(expectedTime) { - t.Fatalf("expected timestamp to be %v, but was %v", expectedTime, actualTimestamp) - } + actualTimestamp := time.Unix(actualTimestampSec, 0).UTC() + require.Equal(t, expectedTime, actualTimestamp) }) t.Run("no git available", func(t *testing.T) { @@ -381,11 +368,11 @@ func TestCreateTemplateData(t *testing.T) { params := createTemplateData(context.TODO(), buildContext{dir: dir}) gitParams := params["Git"].(map[string]interface{}) - requireEqual(t, "", gitParams["Branch"]) - requireEqual(t, "", gitParams["Tag"]) - requireEqual(t, "", gitParams["ShortCommit"]) - requireEqual(t, "", gitParams["FullCommit"]) - requireEqual(t, "clean", gitParams["TreeState"]) + require.Equal(t, "", gitParams["Branch"]) + require.Equal(t, "", gitParams["Tag"]) + require.Equal(t, "", gitParams["ShortCommit"]) + require.Equal(t, "", gitParams["FullCommit"]) + require.Equal(t, "clean", gitParams["TreeState"]) }) t.Run("git", func(t *testing.T) { @@ -400,9 +387,9 @@ func TestCreateTemplateData(t *testing.T) { params := createTemplateData(context.TODO(), buildContext{dir: dir}) gitParams := params["Git"].(map[string]interface{}) - requireEqual(t, "main", gitParams["Branch"]) - requireEqual(t, "v0.0.1", gitParams["Tag"]) - requireEqual(t, "clean", gitParams["TreeState"]) + require.Equal(t, "main", gitParams["Branch"]) + require.Equal(t, "v0.0.1", gitParams["Tag"]) + require.Equal(t, "clean", gitParams["TreeState"]) }) } @@ -1341,10 +1328,3 @@ func TestGoBuildConsistentMediaTypes(t *testing.T) { }) } } - -func requireEqual(t *testing.T, expected any, actual any) { - t.Helper() - if diff := cmp.Diff(expected, actual); diff != "" { - t.Fatalf("%T differ (-got, +want): %s", expected, diff) - } -} diff --git a/pkg/internal/git/info_test.go b/pkg/internal/git/info_test.go index 767116d0dd..702130b3ca 100644 --- a/pkg/internal/git/info_test.go +++ b/pkg/internal/git/info_test.go @@ -38,15 +38,13 @@ package git_test import ( "context" - "errors" "os" "path/filepath" - "strings" "testing" - "github.com/google/go-cmp/cmp" "github.com/google/ko/pkg/internal/git" "github.com/google/ko/pkg/internal/gittesting" + "github.com/stretchr/testify/require" ) const fakeGitURL = "git@github.com:foo/bar.git" @@ -54,7 +52,7 @@ const fakeGitURL = "git@github.com:foo/bar.git" func TestNotAGitFolder(t *testing.T) { dir := t.TempDir() i, err := git.GetInfo(context.TODO(), dir) - requireErrorIs(t, err, git.ErrNotRepository) + require.ErrorIs(t, err, git.ErrNotRepository) tpl := i.TemplateValue() requireEmpty(t, tpl) @@ -67,18 +65,18 @@ func TestSingleCommit(t *testing.T) { gittesting.GitCommit(t, dir, "commit1") gittesting.GitTag(t, dir, "v0.0.1") i, err := git.GetInfo(context.TODO(), dir) - requireNoError(t, err) + require.NoError(t, err) tpl := i.TemplateValue() - requireEqual(t, "main", tpl["Branch"]) - requireEqual(t, "v0.0.1", tpl["Tag"]) - requireNotEmpty(t, tpl["ShortCommit"].(string)) - requireNotEmpty(t, tpl["FullCommit"].(string)) - requireNotEmpty(t, tpl["CommitDate"].(string)) - requireNotZero(t, tpl["CommitTimestamp"].(int64)) - requireFalse(t, tpl["IsDirty"].(bool)) - requireTrue(t, tpl["IsClean"].(bool)) - requireEqual(t, "clean", tpl["TreeState"]) + require.Equal(t, "main", tpl["Branch"]) + require.Equal(t, "v0.0.1", tpl["Tag"]) + require.NotEmpty(t, tpl["ShortCommit"].(string)) + require.NotEmpty(t, tpl["FullCommit"].(string)) + require.NotEmpty(t, tpl["CommitDate"].(string)) + require.NotZero(t, tpl["CommitTimestamp"].(int64)) + require.False(t, tpl["IsDirty"].(bool)) + require.True(t, tpl["IsClean"].(bool)) + require.Equal(t, "clean", tpl["TreeState"]) } func TestBranch(t *testing.T) { @@ -89,18 +87,18 @@ func TestBranch(t *testing.T) { gittesting.GitTag(t, dir, "test-branch-tag") gittesting.GitCheckoutBranch(t, dir, "test-branch") i, err := git.GetInfo(context.TODO(), dir) - requireNoError(t, err) + require.NoError(t, err) tpl := i.TemplateValue() - requireEqual(t, "test-branch", tpl["Branch"]) - requireEqual(t, "test-branch-tag", tpl["Tag"]) - requireNotEmpty(t, tpl["ShortCommit"].(string)) - requireNotEmpty(t, tpl["FullCommit"].(string)) - requireNotEmpty(t, tpl["CommitDate"].(string)) - requireNotZero(t, tpl["CommitTimestamp"].(int64)) - requireFalse(t, tpl["IsDirty"].(bool)) - requireTrue(t, tpl["IsClean"].(bool)) - requireEqual(t, "clean", tpl["TreeState"]) + require.Equal(t, "test-branch", tpl["Branch"]) + require.Equal(t, "test-branch-tag", tpl["Tag"]) + require.NotEmpty(t, tpl["ShortCommit"].(string)) + require.NotEmpty(t, tpl["FullCommit"].(string)) + require.NotEmpty(t, tpl["CommitDate"].(string)) + require.NotZero(t, tpl["CommitTimestamp"].(int64)) + require.False(t, tpl["IsDirty"].(bool)) + require.True(t, tpl["IsClean"].(bool)) + require.Equal(t, "clean", tpl["TreeState"]) } func TestNewRepository(t *testing.T) { @@ -108,7 +106,7 @@ func TestNewRepository(t *testing.T) { gittesting.GitInit(t, dir) i, err := git.GetInfo(context.TODO(), dir) // TODO: improve this error handling - requireErrorContains(t, err, `fatal: ambiguous argument 'HEAD'`) + require.ErrorContains(t, err, `fatal: ambiguous argument 'HEAD'`) tpl := i.TemplateValue() requireEmpty(t, tpl) @@ -120,18 +118,18 @@ func TestNoTags(t *testing.T) { gittesting.GitRemoteAdd(t, dir, fakeGitURL) gittesting.GitCommit(t, dir, "first") i, err := git.GetInfo(context.TODO(), dir) - requireErrorIs(t, err, git.ErrNoTag) + require.ErrorIs(t, err, git.ErrNoTag) tpl := i.TemplateValue() - requireEqual(t, "main", tpl["Branch"]) - requireEqual(t, "v0.0.0", tpl["Tag"]) - requireNotEmpty(t, tpl["ShortCommit"].(string)) - requireNotEmpty(t, tpl["FullCommit"].(string)) - requireNotEmpty(t, tpl["CommitDate"].(string)) - requireNotZero(t, tpl["CommitTimestamp"].(int64)) - requireFalse(t, tpl["IsDirty"].(bool)) - requireTrue(t, tpl["IsClean"].(bool)) - requireEqual(t, "clean", tpl["TreeState"]) + require.Equal(t, "main", tpl["Branch"]) + require.Equal(t, "v0.0.0", tpl["Tag"]) + require.NotEmpty(t, tpl["ShortCommit"].(string)) + require.NotEmpty(t, tpl["FullCommit"].(string)) + require.NotEmpty(t, tpl["CommitDate"].(string)) + require.NotZero(t, tpl["CommitTimestamp"].(int64)) + require.False(t, tpl["IsDirty"].(bool)) + require.True(t, tpl["IsClean"].(bool)) + require.Equal(t, "clean", tpl["TreeState"]) } func TestDirty(t *testing.T) { @@ -139,25 +137,25 @@ func TestDirty(t *testing.T) { gittesting.GitInit(t, dir) gittesting.GitRemoteAdd(t, dir, fakeGitURL) testFile, err := os.Create(filepath.Join(dir, "testFile")) - requireNoError(t, err) - requireNoError(t, testFile.Close()) + require.NoError(t, err) + require.NoError(t, testFile.Close()) gittesting.GitAdd(t, dir) gittesting.GitCommit(t, dir, "commit2") gittesting.GitTag(t, dir, "v0.0.1") - requireNoError(t, os.WriteFile(testFile.Name(), []byte("lorem ipsum"), 0o644)) + require.NoError(t, os.WriteFile(testFile.Name(), []byte("lorem ipsum"), 0o644)) i, err := git.GetInfo(context.TODO(), dir) - requireErrorContains(t, err, "git is in a dirty state") + require.ErrorContains(t, err, "git is in a dirty state") tpl := i.TemplateValue() - requireEqual(t, "main", tpl["Branch"]) - requireEqual(t, "v0.0.1", tpl["Tag"]) - requireNotEmpty(t, tpl["ShortCommit"].(string)) - requireNotEmpty(t, tpl["FullCommit"].(string)) - requireNotEmpty(t, tpl["CommitDate"].(string)) - requireNotZero(t, tpl["CommitTimestamp"].(int64)) - requireTrue(t, tpl["IsDirty"].(bool)) - requireFalse(t, tpl["IsClean"].(bool)) - requireEqual(t, "dirty", tpl["TreeState"]) + require.Equal(t, "main", tpl["Branch"]) + require.Equal(t, "v0.0.1", tpl["Tag"]) + require.NotEmpty(t, tpl["ShortCommit"].(string)) + require.NotEmpty(t, tpl["FullCommit"].(string)) + require.NotEmpty(t, tpl["CommitDate"].(string)) + require.NotZero(t, tpl["CommitTimestamp"].(int64)) + require.True(t, tpl["IsDirty"].(bool)) + require.False(t, tpl["IsClean"].(bool)) + require.Equal(t, "dirty", tpl["TreeState"]) } func TestValidState(t *testing.T) { @@ -170,86 +168,26 @@ func TestValidState(t *testing.T) { gittesting.GitCommit(t, dir, "commit4") gittesting.GitTag(t, dir, "v0.0.3") i, err := git.GetInfo(context.TODO(), dir) - requireNoError(t, err) - requireEqual(t, "v0.0.3", i.Tag) - requireFalse(t, i.Dirty) + require.NoError(t, err) + require.Equal(t, "v0.0.3", i.Tag) + require.False(t, i.Dirty) } func TestGitNotInPath(t *testing.T) { t.Setenv("PATH", "") i, err := git.GetInfo(context.TODO(), "") - requireErrorIs(t, err, git.ErrNoGit) + require.ErrorIs(t, err, git.ErrNoGit) tpl := i.TemplateValue() requireEmpty(t, tpl) } func requireEmpty(t *testing.T, tpl map[string]interface{}) { - requireEqual(t, "", tpl["Branch"]) - requireEqual(t, "", tpl["Tag"]) - requireEqual(t, "", tpl["ShortCommit"]) - requireEqual(t, "", tpl["FullCommit"]) - requireFalse(t, tpl["IsDirty"].(bool)) - requireTrue(t, tpl["IsClean"].(bool)) - requireEqual(t, "clean", tpl["TreeState"]) -} - -func requireEqual(t *testing.T, expected any, actual any) { - t.Helper() - if diff := cmp.Diff(expected, actual); diff != "" { - t.Fatalf("%T differ (-got, +want): %s", expected, diff) - } -} - -func requireTrue(t *testing.T, val bool) { - t.Helper() - requireEqual(t, true, val) -} - -func requireFalse(t *testing.T, val bool) { - t.Helper() - requireEqual(t, false, val) -} - -func requireNoError(t *testing.T, err error) { - t.Helper() - if err != nil { - t.Fatalf("unexpected error: %s", err) - } -} - -func requireError(t *testing.T, err error) { - t.Helper() - if err == nil { - t.Fatal("expected error") - } -} - -func requireErrorIs(t *testing.T, err error, target error) { - t.Helper() - if !errors.Is(err, target) { - t.Fatalf("expected error to be %v, got %v", target, err) - } -} - -func requireErrorContains(t *testing.T, err error, target string) { - t.Helper() - requireError(t, err) - if !strings.Contains(err.Error(), target) { - t.Fatalf("expected error to contain %q, got %q", target, err) - } -} - -func requireNotEmpty(t *testing.T, val string) { - t.Helper() - if len(val) == 0 { - t.Fatalf("value should not be empty") - } -} - -func requireNotZero(t *testing.T, val int64) { - t.Helper() - if val == 0 { - t.Fatalf("value should not be zero") - } + require.Equal(t, "", tpl["Branch"]) + require.Equal(t, "", tpl["Tag"]) + require.Equal(t, "", tpl["ShortCommit"]) + require.Equal(t, "", tpl["FullCommit"]) + require.False(t, tpl["IsDirty"].(bool)) + require.True(t, tpl["IsClean"].(bool)) + require.Equal(t, "clean", tpl["TreeState"]) } diff --git a/pkg/internal/gittesting/git.go b/pkg/internal/gittesting/git.go index d2397d1692..9a9dd915a2 100644 --- a/pkg/internal/gittesting/git.go +++ b/pkg/internal/gittesting/git.go @@ -40,17 +40,18 @@ import ( "bytes" "errors" "os/exec" - "strings" "testing" + + "github.com/stretchr/testify/require" ) // GitInit inits a new git project. func GitInit(t *testing.T, dir string) { t.Helper() out, err := fakeGit(dir, "init") - requireNoError(t, err) - requireContains(t, out, "Initialized empty Git repository", "") - requireNoError(t, err) + require.NoError(t, err) + require.Contains(t, out, "Initialized empty Git repository", "") + require.NoError(t, err) GitCheckoutBranch(t, dir, "main") _, _ = fakeGit("branch", "-D", "master") } @@ -59,40 +60,32 @@ func GitInit(t *testing.T, dir string) { func GitRemoteAdd(t *testing.T, dir, url string) { t.Helper() out, err := fakeGit(dir, "remote", "add", "origin", url) - requireNoError(t, err) - requireEmpty(t, out) + require.NoError(t, err) + require.Empty(t, out) } // GitCommit creates a git commits. func GitCommit(t *testing.T, dir, msg string) { t.Helper() out, err := fakeGit(dir, "commit", "--allow-empty", "-m", msg) - requireNoError(t, err) - requireContains(t, out, "main", msg) + require.NoError(t, err) + require.Contains(t, out, "main", msg) } // GitTag creates a git tag. func GitTag(t *testing.T, dir, tag string) { t.Helper() out, err := fakeGit(dir, "tag", tag) - requireNoError(t, err) - requireEmpty(t, out) -} - -// GitAnnotatedTag creates an annotated tag. -func GitAnnotatedTag(t *testing.T, dir, tag, message string) { - t.Helper() - out, err := fakeGit(dir, "tag", "-a", tag, "-m", message) - requireNoError(t, err) - requireEmpty(t, out) + require.NoError(t, err) + require.Empty(t, out) } // GitAdd adds all files to stage. func GitAdd(t *testing.T, dir string) { t.Helper() out, err := fakeGit(dir, "add", "-A") - requireNoError(t, err) - requireEmpty(t, out) + require.NoError(t, err) + require.Empty(t, out) } func fakeGit(dir string, args ...string) (string, error) { @@ -111,8 +104,8 @@ func fakeGit(dir string, args ...string) (string, error) { func GitCheckoutBranch(t *testing.T, dir, name string) { t.Helper() out, err := fakeGit(dir, "checkout", "-b", name) - requireNoError(t, err) - requireEmpty(t, out) + require.NoError(t, err) + require.Empty(t, out) } func gitRun(dir string, args ...string) (string, error) { @@ -133,28 +126,3 @@ func gitRun(dir string, args ...string) (string, error) { return stdout.String(), nil } - -func requireNoError(t *testing.T, err error) { - t.Helper() - if err != nil { - t.Fatalf("unexpected error: %s", err) - } -} - -func requireContains(t *testing.T, val, expected, msg string) { - t.Helper() - if !strings.Contains(val, expected) { - if len(msg) > 0 { - t.Fatalf("%s: expected value %s missing from value %s", msg, expected, val) - } else { - t.Fatalf("expected value %s missing from value %s", expected, val) - } - } -} - -func requireEmpty(t *testing.T, val string) { - t.Helper() - if len(val) > 0 { - t.Fatalf("%s: expected empty string", val) - } -} diff --git a/vendor/cloud.google.com/go/compute/LICENSE b/vendor/cloud.google.com/go/compute/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/cloud.google.com/go/compute/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go deleted file mode 100644 index 540ad16ac4..0000000000 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -// Version is the current tagged release of the library. -const Version = "1.23.3" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md deleted file mode 100644 index 06b957349a..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ /dev/null @@ -1,19 +0,0 @@ -# Changes - -## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) - - -### Bug Fixes - -* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165) - -## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) - - -### Bug Fixes - -* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) - -## [0.1.0] (2022-10-26) - -Initial release of metadata being it's own module. diff --git a/vendor/cloud.google.com/go/compute/metadata/LICENSE b/vendor/cloud.google.com/go/compute/metadata/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/README.md b/vendor/cloud.google.com/go/compute/metadata/README.md deleted file mode 100644 index f940fb2c85..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# Compute API - -[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata) - -This is a utility library for communicating with Google Cloud metadata service -on Google Cloud. - -## Install - -```bash -go get cloud.google.com/go/compute/metadata -``` - -## Go Version Support - -See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) -section in the root directory's README. - -## Contributing - -Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) -document for details. - -Please note that this project is released with a Contributor Code of Conduct. -By participating in this project you agree to abide by its terms. See -[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) -for more information. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go deleted file mode 100644 index c17faa142a..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ /dev/null @@ -1,543 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metadata provides access to Google Compute Engine (GCE) -// metadata and API service accounts. -// -// This package is a wrapper around the GCE metadata service, -// as documented at https://cloud.google.com/compute/docs/metadata/overview. -package metadata // import "cloud.google.com/go/compute/metadata" - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strings" - "sync" - "time" -) - -const ( - // metadataIP is the documented metadata server IP address. - metadataIP = "169.254.169.254" - - // metadataHostEnv is the environment variable specifying the - // GCE metadata hostname. If empty, the default value of - // metadataIP ("169.254.169.254") is used instead. - // This is variable name is not defined by any spec, as far as - // I know; it was made up for the Go package. - metadataHostEnv = "GCE_METADATA_HOST" - - userAgent = "gcloud-golang/0.1" -) - -type cachedValue struct { - k string - trim bool - mu sync.Mutex - v string -} - -var ( - projID = &cachedValue{k: "project/project-id", trim: true} - projNum = &cachedValue{k: "project/numeric-project-id", trim: true} - instID = &cachedValue{k: "instance/id", trim: true} -) - -var defaultClient = &Client{hc: newDefaultHTTPClient()} - -func newDefaultHTTPClient() *http.Client { - return &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - IdleConnTimeout: 60 * time.Second, - }, - Timeout: 5 * time.Second, - } -} - -// NotDefinedError is returned when requested metadata is not defined. -// -// The underlying string is the suffix after "/computeMetadata/v1/". -// -// This error is not returned if the value is defined to be the empty -// string. -type NotDefinedError string - -func (suffix NotDefinedError) Error() string { - return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) -} - -func (c *cachedValue) get(cl *Client) (v string, err error) { - defer c.mu.Unlock() - c.mu.Lock() - if c.v != "" { - return c.v, nil - } - if c.trim { - v, err = cl.getTrimmed(c.k) - } else { - v, err = cl.Get(c.k) - } - if err == nil { - c.v = v - } - return -} - -var ( - onGCEOnce sync.Once - onGCE bool -) - -// OnGCE reports whether this process is running on Google Compute Engine. -func OnGCE() bool { - onGCEOnce.Do(initOnGCE) - return onGCE -} - -func initOnGCE() { - onGCE = testOnGCE() -} - -func testOnGCE() bool { - // The user explicitly said they're on GCE, so trust them. - if os.Getenv(metadataHostEnv) != "" { - return true - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - resc := make(chan bool, 2) - - // Try two strategies in parallel. - // See https://github.com/googleapis/google-cloud-go/issues/194 - go func() { - req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) - req.Header.Set("User-Agent", userAgent) - res, err := newDefaultHTTPClient().Do(req.WithContext(ctx)) - if err != nil { - resc <- false - return - } - defer res.Body.Close() - resc <- res.Header.Get("Metadata-Flavor") == "Google" - }() - - go func() { - resolver := &net.Resolver{} - addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") - if err != nil || len(addrs) == 0 { - resc <- false - return - } - resc <- strsContains(addrs, metadataIP) - }() - - tryHarder := systemInfoSuggestsGCE() - if tryHarder { - res := <-resc - if res { - // The first strategy succeeded, so let's use it. - return true - } - // Wait for either the DNS or metadata server probe to - // contradict the other one and say we are running on - // GCE. Give it a lot of time to do so, since the system - // info already suggests we're running on a GCE BIOS. - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() - select { - case res = <-resc: - return res - case <-timer.C: - // Too slow. Who knows what this system is. - return false - } - } - - // There's no hint from the system info that we're running on - // GCE, so use the first probe's result as truth, whether it's - // true or false. The goal here is to optimize for speed for - // users who are NOT running on GCE. We can't assume that - // either a DNS lookup or an HTTP request to a blackholed IP - // address is fast. Worst case this should return when the - // metaClient's Transport.ResponseHeaderTimeout or - // Transport.Dial.Timeout fires (in two seconds). - return <-resc -} - -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - -// Subscribe calls Client.Subscribe on the default client. -func Subscribe(suffix string, fn func(v string, ok bool) error) error { - return defaultClient.Subscribe(suffix, fn) -} - -// Get calls Client.Get on the default client. -func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } - -// ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } - -// NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } - -// InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } - -// ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } - -// Email calls Client.Email on the default client. -func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } - -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } - -// InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } - -// Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } - -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } - -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } - -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. -func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) -} - -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. -func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) -} - -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } - -func strsContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -// A Client provides metadata. -type Client struct { - hc *http.Client -} - -// NewClient returns a Client that can be used to fetch metadata. -// Returns the client that uses the specified http.Client for HTTP requests. -// If nil is specified, returns the default client. -func NewClient(c *http.Client) *Client { - if c == nil { - return defaultClient - } - - return &Client{hc: c} -} - -// getETag returns a value from the metadata service as well as the associated ETag. -// This func is otherwise equivalent to Get. -func (c *Client) getETag(suffix string) (value, etag string, err error) { - ctx := context.TODO() - // Using a fixed IP makes it very difficult to spoof the metadata service in - // a container, which is an important use-case for local testing of cloud - // deployments. To enable spoofing of the metadata service, the environment - // variable GCE_METADATA_HOST is first inspected to decide where metadata - // requests shall go. - host := os.Getenv(metadataHostEnv) - if host == "" { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - host = metadataIP - } - suffix = strings.TrimLeft(suffix, "/") - u := "http://" + host + "/computeMetadata/v1/" + suffix - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return "", "", err - } - req.Header.Set("Metadata-Flavor", "Google") - req.Header.Set("User-Agent", userAgent) - var res *http.Response - var reqErr error - retryer := newRetryer() - for { - res, reqErr = c.hc.Do(req) - var code int - if res != nil { - code = res.StatusCode - } - if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { - if err := sleep(ctx, delay); err != nil { - return "", "", err - } - continue - } - break - } - if reqErr != nil { - return "", "", reqErr - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - if res.StatusCode != 200 { - return "", "", &Error{Code: res.StatusCode, Message: string(all)} - } - return string(all), res.Header.Get("Etag"), nil -} - -// Get returns a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// If the GCE_METADATA_HOST environment variable is not defined, a default of -// 169.254.169.254 will be used instead. -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func (c *Client) Get(suffix string) (string, error) { - val, _, err := c.getETag(suffix) - return val, err -} - -func (c *Client) getTrimmed(suffix string) (s string, err error) { - s, err = c.Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} - -// ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } - -// NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } - -// InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } - -// InternalIP returns the instance's primary internal IP address. -func (c *Client) InternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/ip") -} - -// Email returns the email address associated with the service account. -// The account may be empty or the string "default" to use the instance's -// main account. -func (c *Client) Email(serviceAccount string) (string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email") -} - -// ExternalIP returns the instance's primary external (public) IP address. -func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") -} - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func (c *Client) Hostname() (string, error) { - return c.getTrimmed("instance/hostname") -} - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func (c *Client) InstanceTags() ([]string, error) { - var s []string - j, err := c.Get("instance/tags") - if err != nil { - return nil, err - } - if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { - return nil, err - } - return s, nil -} - -// InstanceName returns the current VM's instance ID string. -func (c *Client) InstanceName() (string, error) { - return c.getTrimmed("instance/name") -} - -// Zone returns the current VM's zone, such as "us-central1-b". -func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed("instance/zone") - // zone is of the form "projects//zones/". - if err != nil { - return "", err - } - return zone[strings.LastIndex(zone, "/")+1:], nil -} - -// InstanceAttributes returns the list of user-defined attributes, -// assigned when initially creating a GCE VM instance. The value of an -// attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } - -// ProjectAttributes returns the list of user-defined attributes -// applying to the project as a whole, not just this VM. The value of -// an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } - -// InstanceAttributeValue returns the value of the provided VM -// instance attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// InstanceAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.Get("instance/attributes/" + attr) -} - -// ProjectAttributeValue returns the value of the provided -// project attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// ProjectAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.Get("project/attributes/" + attr) -} - -// Scopes returns the service account scopes for the given account. -// The account may be empty or the string "default" to use the instance's -// main account. -func (c *Client) Scopes(serviceAccount string) ([]string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") -} - -// Subscribe subscribes to a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// The suffix may contain query parameters. -// -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. -func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { - const failedSubscribeSleep = time.Second * 5 - - // First check to see if the metadata value exists at all. - val, lastETag, err := c.getETag(suffix) - if err != nil { - return err - } - - if err := fn(val, true); err != nil { - return err - } - - ok := true - if strings.ContainsRune(suffix, '?') { - suffix += "&wait_for_change=true&last_etag=" - } else { - suffix += "?wait_for_change=true&last_etag=" - } - for { - val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) - if err != nil { - if _, deleted := err.(NotDefinedError); !deleted { - time.Sleep(failedSubscribeSleep) - continue // Retry on other errors. - } - ok = false - } - lastETag = etag - - if err := fn(val, ok); err != nil || !ok { - return err - } - } -} - -// Error contains an error response from the server. -type Error struct { - // Code is the HTTP response status code. - Code int - // Message is the server response message. - Message string -} - -func (e *Error) Error() string { - return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) -} diff --git a/vendor/cloud.google.com/go/compute/metadata/retry.go b/vendor/cloud.google.com/go/compute/metadata/retry.go deleted file mode 100644 index 0f18f3cda1..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/retry.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metadata - -import ( - "context" - "io" - "math/rand" - "net/http" - "time" -) - -const ( - maxRetryAttempts = 5 -) - -var ( - syscallRetryable = func(err error) bool { return false } -) - -// defaultBackoff is basically equivalent to gax.Backoff without the need for -// the dependency. -type defaultBackoff struct { - max time.Duration - mul float64 - cur time.Duration -} - -func (b *defaultBackoff) Pause() time.Duration { - d := time.Duration(1 + rand.Int63n(int64(b.cur))) - b.cur = time.Duration(float64(b.cur) * b.mul) - if b.cur > b.max { - b.cur = b.max - } - return d -} - -// sleep is the equivalent of gax.Sleep without the need for the dependency. -func sleep(ctx context.Context, d time.Duration) error { - t := time.NewTimer(d) - select { - case <-ctx.Done(): - t.Stop() - return ctx.Err() - case <-t.C: - return nil - } -} - -func newRetryer() *metadataRetryer { - return &metadataRetryer{bo: &defaultBackoff{ - cur: 100 * time.Millisecond, - max: 30 * time.Second, - mul: 2, - }} -} - -type backoff interface { - Pause() time.Duration -} - -type metadataRetryer struct { - bo backoff - attempts int -} - -func (r *metadataRetryer) Retry(status int, err error) (time.Duration, bool) { - if status == http.StatusOK { - return 0, false - } - retryOk := shouldRetry(status, err) - if !retryOk { - return 0, false - } - if r.attempts == maxRetryAttempts { - return 0, false - } - r.attempts++ - return r.bo.Pause(), true -} - -func shouldRetry(status int, err error) bool { - if 500 <= status && status <= 599 { - return true - } - if err == io.ErrUnexpectedEOF { - return true - } - // Transient network errors should be retried. - if syscallRetryable(err) { - return true - } - if err, ok := err.(interface{ Temporary() bool }); ok { - if err.Temporary() { - return true - } - } - if err, ok := err.(interface{ Unwrap() error }); ok { - return shouldRetry(status, err.Unwrap()) - } - return false -} diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go deleted file mode 100644 index bb412f8917..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux -// +build linux - -package metadata - -import "syscall" - -func init() { - // Initialize syscallRetryable to return true on transient socket-level - // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } -} diff --git a/vendor/cloud.google.com/go/compute/metadata/tidyfix.go b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go deleted file mode 100644 index 4cef485008..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/tidyfix.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the {{.RootMod}} import, won't actually become part of -// the resultant binary. -//go:build modhack -// +build modhack - -package metadata - -// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "cloud.google.com/go/compute/internal" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt deleted file mode 100644 index 05b0ebf5bc..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) Microsoft Corporation. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE.txt b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE.txt deleted file mode 100644 index a338672ec5..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE.txt +++ /dev/null @@ -1,29 +0,0 @@ -NOTICES AND INFORMATION -Do Not Translate or Localize - -This software incorporates material from third parties. Microsoft makes certain -open source code available at https://3rdpartysource.microsoft.com, or you may -send a check or money order for US $5.00, including the product name, the open -source component name, and version number, to: - -Source Code Compliance Team -Microsoft Corporation -One Microsoft Way -Redmond, WA 98052 -USA - -Notwithstanding any other terms, you may reverse engineer this software to the -extent required to debug changes to any libraries licensed under the GNU Lesser -General Public License. - ------------------------------------------------------------------------------- - -Azure SDK for Go uses third-party libraries or other resources that may be -distributed under licenses different than the Azure SDK for Go software. - -In the event that we accidentally failed to list a required notice, please -bring it to our attention. Post an issue or email us: - - azgosdkhelp@microsoft.com - -The attached notices are provided for information only. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/CHANGELOG.md deleted file mode 100644 index 52911e4cc5..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/CHANGELOG.md +++ /dev/null @@ -1,2 +0,0 @@ -# Change History - diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/accesstokens.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/accesstokens.go deleted file mode 100644 index e9bc66a397..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/accesstokens.go +++ /dev/null @@ -1,178 +0,0 @@ -package containerregistry - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// AccessTokensClient is the metadata API definition for the Azure Container Registry runtime -type AccessTokensClient struct { - BaseClient -} - -// NewAccessTokensClient creates an instance of the AccessTokensClient client. -func NewAccessTokensClient(loginURI string) AccessTokensClient { - return AccessTokensClient{New(loginURI)} -} - -// Get exchange ACR Refresh token for an ACR Access Token -// Parameters: -// service - indicates the name of your Azure container registry. -// scope - which is expected to be a valid scope, and can be specified more than once for multiple scope -// requests. You obtained this from the Www-Authenticate response header from the challenge. -// refreshToken - must be a valid ACR refresh token -func (client AccessTokensClient) Get(ctx context.Context, service string, scope string, refreshToken string) (result AccessToken, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccessTokensClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, service, scope, refreshToken) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.AccessTokensClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.AccessTokensClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.AccessTokensClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client AccessTokensClient) GetPreparer(ctx context.Context, service string, scope string, refreshToken string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - formDataParameters := map[string]interface{}{ - "grant_type": "refresh_token", - "refresh_token": refreshToken, - "scope": scope, - "service": service, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPath("/oauth2/token"), - autorest.WithFormData(autorest.MapToValues(formDataParameters))) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client AccessTokensClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client AccessTokensClient) GetResponder(resp *http.Response) (result AccessToken, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetFromLogin exchange Username, Password and Scope an ACR Access Token -// Parameters: -// service - indicates the name of your Azure container registry. -// scope - expected to be a valid scope, and can be specified more than once for multiple scope requests. You -// can obtain this from the Www-Authenticate response header from the challenge. -func (client AccessTokensClient) GetFromLogin(ctx context.Context, service string, scope string) (result AccessToken, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccessTokensClient.GetFromLogin") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetFromLoginPreparer(ctx, service, scope) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.AccessTokensClient", "GetFromLogin", nil, "Failure preparing request") - return - } - - resp, err := client.GetFromLoginSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.AccessTokensClient", "GetFromLogin", resp, "Failure sending request") - return - } - - result, err = client.GetFromLoginResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.AccessTokensClient", "GetFromLogin", resp, "Failure responding to request") - return - } - - return -} - -// GetFromLoginPreparer prepares the GetFromLogin request. -func (client AccessTokensClient) GetFromLoginPreparer(ctx context.Context, service string, scope string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - queryParameters := map[string]interface{}{ - "scope": autorest.Encode("query", scope), - "service": autorest.Encode("query", service), - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPath("/oauth2/token"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetFromLoginSender sends the GetFromLogin request. The method will close the -// http.Response Body if it receives an error. -func (client AccessTokensClient) GetFromLoginSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetFromLoginResponder handles the response to the GetFromLogin request. The method always -// closes the http.Response Body. -func (client AccessTokensClient) GetFromLoginResponder(resp *http.Response) (result AccessToken, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/blob.go deleted file mode 100644 index da512a659e..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/blob.go +++ /dev/null @@ -1,842 +0,0 @@ -package containerregistry - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "io" - "net/http" -) - -// BlobClient is the metadata API definition for the Azure Container Registry runtime -type BlobClient struct { - BaseClient -} - -// NewBlobClient creates an instance of the BlobClient client. -func NewBlobClient(loginURI string) BlobClient { - return BlobClient{New(loginURI)} -} - -// CancelUpload cancel outstanding upload processes, releasing associated resources. If this is not called, the -// unfinished uploads will eventually timeout. -// Parameters: -// location - link acquired from upload start or previous chunk. Note, do not include initial / (must do -// substring(1) ) -func (client BlobClient) CancelUpload(ctx context.Context, location string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.CancelUpload") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.CancelUploadPreparer(ctx, location) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "CancelUpload", nil, "Failure preparing request") - return - } - - resp, err := client.CancelUploadSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "CancelUpload", resp, "Failure sending request") - return - } - - result, err = client.CancelUploadResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "CancelUpload", resp, "Failure responding to request") - return - } - - return -} - -// CancelUploadPreparer prepares the CancelUpload request. -func (client BlobClient) CancelUploadPreparer(ctx context.Context, location string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "nextBlobUuidLink": location, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/{nextBlobUuidLink}", pathParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CancelUploadSender sends the CancelUpload request. The method will close the -// http.Response Body if it receives an error. -func (client BlobClient) CancelUploadSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// CancelUploadResponder handles the response to the CancelUpload request. The method always -// closes the http.Response Body. -func (client BlobClient) CancelUploadResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Check same as GET, except only the headers are returned. -// Parameters: -// name - name of the image (including the namespace) -// digest - digest of a BLOB -func (client BlobClient) Check(ctx context.Context, name string, digest string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.Check") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.CheckPreparer(ctx, name, digest) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Check", nil, "Failure preparing request") - return - } - - resp, err := client.CheckSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Check", resp, "Failure sending request") - return - } - - result, err = client.CheckResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Check", resp, "Failure responding to request") - return - } - - return -} - -// CheckPreparer prepares the Check request. -func (client BlobClient) CheckPreparer(ctx context.Context, name string, digest string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "digest": autorest.Encode("path", digest), - "name": autorest.Encode("path", name), - } - - preparer := autorest.CreatePreparer( - autorest.AsHead(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/v2/{name}/blobs/{digest}", pathParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CheckSender sends the Check request. The method will close the -// http.Response Body if it receives an error. -func (client BlobClient) CheckSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// CheckResponder handles the response to the Check request. The method always -// closes the http.Response Body. -func (client BlobClient) CheckResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusTemporaryRedirect), - autorest.ByClosing()) - result.Response = resp - return -} - -// CheckChunk same as GET, except only the headers are returned. -// Parameters: -// name - name of the image (including the namespace) -// digest - digest of a BLOB -// rangeParameter - format : bytes=-, HTTP Range header specifying blob chunk. -func (client BlobClient) CheckChunk(ctx context.Context, name string, digest string, rangeParameter string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.CheckChunk") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.CheckChunkPreparer(ctx, name, digest, rangeParameter) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "CheckChunk", nil, "Failure preparing request") - return - } - - resp, err := client.CheckChunkSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "CheckChunk", resp, "Failure sending request") - return - } - - result, err = client.CheckChunkResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "CheckChunk", resp, "Failure responding to request") - return - } - - return -} - -// CheckChunkPreparer prepares the CheckChunk request. -func (client BlobClient) CheckChunkPreparer(ctx context.Context, name string, digest string, rangeParameter string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "digest": autorest.Encode("path", digest), - "name": autorest.Encode("path", name), - } - - preparer := autorest.CreatePreparer( - autorest.AsHead(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/v2/{name}/blobs/{digest}", pathParameters), - autorest.WithHeader("Range", autorest.String(rangeParameter))) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CheckChunkSender sends the CheckChunk request. The method will close the -// http.Response Body if it receives an error. -func (client BlobClient) CheckChunkSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// CheckChunkResponder handles the response to the CheckChunk request. The method always -// closes the http.Response Body. -func (client BlobClient) CheckChunkResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// Delete removes an already uploaded blob. -// Parameters: -// name - name of the image (including the namespace) -// digest - digest of a BLOB -func (client BlobClient) Delete(ctx context.Context, name string, digest string) (result ReadCloser, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.Delete") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, name, digest) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client BlobClient) DeletePreparer(ctx context.Context, name string, digest string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "digest": autorest.Encode("path", digest), - "name": autorest.Encode("path", name), - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/v2/{name}/blobs/{digest}", pathParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client BlobClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client BlobClient) DeleteResponder(resp *http.Response) (result ReadCloser, err error) { - result.Value = &resp.Body - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - result.Response = autorest.Response{Response: resp} - return -} - -// EndUpload complete the upload, providing all the data in the body, if necessary. A request without a body will just -// complete the upload with previously uploaded content. -// Parameters: -// digest - digest of a BLOB -// location - link acquired from upload start or previous chunk. Note, do not include initial / (must do -// substring(1) ) -// value - optional raw data of blob -func (client BlobClient) EndUpload(ctx context.Context, digest string, location string, value io.ReadCloser) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.EndUpload") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.EndUploadPreparer(ctx, digest, location, value) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "EndUpload", nil, "Failure preparing request") - return - } - - resp, err := client.EndUploadSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "EndUpload", resp, "Failure sending request") - return - } - - result, err = client.EndUploadResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "EndUpload", resp, "Failure responding to request") - return - } - - return -} - -// EndUploadPreparer prepares the EndUpload request. -func (client BlobClient) EndUploadPreparer(ctx context.Context, digest string, location string, value io.ReadCloser) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "nextBlobUuidLink": location, - } - - queryParameters := map[string]interface{}{ - "digest": autorest.Encode("query", digest), - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/octet-stream"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/{nextBlobUuidLink}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if value != nil { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithFile(value)) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// EndUploadSender sends the EndUpload request. The method will close the -// http.Response Body if it receives an error. -func (client BlobClient) EndUploadSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// EndUploadResponder handles the response to the EndUpload request. The method always -// closes the http.Response Body. -func (client BlobClient) EndUploadResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get retrieve the blob from the registry identified by digest. -// Parameters: -// name - name of the image (including the namespace) -// digest - digest of a BLOB -func (client BlobClient) Get(ctx context.Context, name string, digest string) (result ReadCloser, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, name, digest) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client BlobClient) GetPreparer(ctx context.Context, name string, digest string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "digest": autorest.Encode("path", digest), - "name": autorest.Encode("path", name), - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/v2/{name}/blobs/{digest}", pathParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client BlobClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client BlobClient) GetResponder(resp *http.Response) (result ReadCloser, err error) { - result.Value = &resp.Body - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusTemporaryRedirect)) - result.Response = autorest.Response{Response: resp} - return -} - -// GetChunk retrieve the blob from the registry identified by `digest`. This endpoint may also support RFC7233 -// compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is -// returned, range requests can be used to fetch partial content. -// Parameters: -// name - name of the image (including the namespace) -// digest - digest of a BLOB -// rangeParameter - format : bytes=-, HTTP Range header specifying blob chunk. -func (client BlobClient) GetChunk(ctx context.Context, name string, digest string, rangeParameter string) (result ReadCloser, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.GetChunk") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetChunkPreparer(ctx, name, digest, rangeParameter) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "GetChunk", nil, "Failure preparing request") - return - } - - resp, err := client.GetChunkSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "GetChunk", resp, "Failure sending request") - return - } - - result, err = client.GetChunkResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "GetChunk", resp, "Failure responding to request") - return - } - - return -} - -// GetChunkPreparer prepares the GetChunk request. -func (client BlobClient) GetChunkPreparer(ctx context.Context, name string, digest string, rangeParameter string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "digest": autorest.Encode("path", digest), - "name": autorest.Encode("path", name), - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/v2/{name}/blobs/{digest}", pathParameters), - autorest.WithHeader("Range", autorest.String(rangeParameter))) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetChunkSender sends the GetChunk request. The method will close the -// http.Response Body if it receives an error. -func (client BlobClient) GetChunkSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetChunkResponder handles the response to the GetChunk request. The method always -// closes the http.Response Body. -func (client BlobClient) GetChunkResponder(resp *http.Response) (result ReadCloser, err error) { - result.Value = &resp.Body - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusPartialContent)) - result.Response = autorest.Response{Response: resp} - return -} - -// GetStatus retrieve status of upload identified by uuid. The primary purpose of this endpoint is to resolve the -// current status of a resumable upload. -// Parameters: -// location - link acquired from upload start or previous chunk. Note, do not include initial / (must do -// substring(1) ) -func (client BlobClient) GetStatus(ctx context.Context, location string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.GetStatus") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetStatusPreparer(ctx, location) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "GetStatus", nil, "Failure preparing request") - return - } - - resp, err := client.GetStatusSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "GetStatus", resp, "Failure sending request") - return - } - - result, err = client.GetStatusResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "GetStatus", resp, "Failure responding to request") - return - } - - return -} - -// GetStatusPreparer prepares the GetStatus request. -func (client BlobClient) GetStatusPreparer(ctx context.Context, location string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "nextBlobUuidLink": location, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/{nextBlobUuidLink}", pathParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetStatusSender sends the GetStatus request. The method will close the -// http.Response Body if it receives an error. -func (client BlobClient) GetStatusSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetStatusResponder handles the response to the GetStatus request. The method always -// closes the http.Response Body. -func (client BlobClient) GetStatusResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Mount mount a blob identified by the `mount` parameter from another repository. -// Parameters: -// name - name of the image (including the namespace) -// from - name of the source repository. -// mount - digest of blob to mount from the source repository. -func (client BlobClient) Mount(ctx context.Context, name string, from string, mount string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.Mount") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.MountPreparer(ctx, name, from, mount) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Mount", nil, "Failure preparing request") - return - } - - resp, err := client.MountSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Mount", resp, "Failure sending request") - return - } - - result, err = client.MountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Mount", resp, "Failure responding to request") - return - } - - return -} - -// MountPreparer prepares the Mount request. -func (client BlobClient) MountPreparer(ctx context.Context, name string, from string, mount string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - } - - queryParameters := map[string]interface{}{ - "from": autorest.Encode("query", from), - "mount": autorest.Encode("query", mount), - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/v2/{name}/blobs/uploads/", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// MountSender sends the Mount request. The method will close the -// http.Response Body if it receives an error. -func (client BlobClient) MountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// MountResponder handles the response to the Mount request. The method always -// closes the http.Response Body. -func (client BlobClient) MountResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByClosing()) - result.Response = resp - return -} - -// StartUpload initiate a resumable blob upload with an empty request body. -// Parameters: -// name - name of the image (including the namespace) -func (client BlobClient) StartUpload(ctx context.Context, name string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.StartUpload") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.StartUploadPreparer(ctx, name) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "StartUpload", nil, "Failure preparing request") - return - } - - resp, err := client.StartUploadSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "StartUpload", resp, "Failure sending request") - return - } - - result, err = client.StartUploadResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "StartUpload", resp, "Failure responding to request") - return - } - - return -} - -// StartUploadPreparer prepares the StartUpload request. -func (client BlobClient) StartUploadPreparer(ctx context.Context, name string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/v2/{name}/blobs/uploads/", pathParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// StartUploadSender sends the StartUpload request. The method will close the -// http.Response Body if it receives an error. -func (client BlobClient) StartUploadSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// StartUploadResponder handles the response to the StartUpload request. The method always -// closes the http.Response Body. -func (client BlobClient) StartUploadResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByClosing()) - result.Response = resp - return -} - -// Upload upload a stream of data without completing the upload. -// Parameters: -// value - raw data of blob -// location - link acquired from upload start or previous chunk. Note, do not include initial / (must do -// substring(1) ) -func (client BlobClient) Upload(ctx context.Context, value io.ReadCloser, location string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.Upload") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UploadPreparer(ctx, value, location) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Upload", nil, "Failure preparing request") - return - } - - resp, err := client.UploadSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Upload", resp, "Failure sending request") - return - } - - result, err = client.UploadResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Upload", resp, "Failure responding to request") - return - } - - return -} - -// UploadPreparer prepares the Upload request. -func (client BlobClient) UploadPreparer(ctx context.Context, value io.ReadCloser, location string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "nextBlobUuidLink": location, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/octet-stream"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/{nextBlobUuidLink}", pathParameters), - autorest.WithFile(value)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UploadSender sends the Upload request. The method will close the -// http.Response Body if it receives an error. -func (client BlobClient) UploadSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UploadResponder handles the response to the Upload request. The method always -// closes the http.Response Body. -func (client BlobClient) UploadResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByClosing()) - result.Response = resp - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/client.go deleted file mode 100644 index d0ca3cef41..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/client.go +++ /dev/null @@ -1,33 +0,0 @@ -// Package containerregistry implements the Azure ARM Containerregistry service API version 2019-08-15-preview. -// -// Metadata API definition for the Azure Container Registry runtime -package containerregistry - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/go-autorest/autorest" -) - -// BaseClient is the base client for Containerregistry. -type BaseClient struct { - autorest.Client - LoginURI string -} - -// New creates an instance of the BaseClient client. -func New(loginURI string) BaseClient { - return NewWithoutDefaults(loginURI) -} - -// NewWithoutDefaults creates an instance of the BaseClient client. -func NewWithoutDefaults(loginURI string) BaseClient { - return BaseClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - LoginURI: loginURI, - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/dataplane_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/dataplane_meta.json deleted file mode 100644 index dc0691372f..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/dataplane_meta.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", - "readme": "/_/azure-rest-api-specs/specification/containerregistry/data-plane/readme.md", - "tag": "package-2019-08", - "use": "@microsoft.azure/autorest.go@2.1.183", - "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.183 --tag=package-2019-08 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/containerregistry/data-plane/readme.md", - "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION" - } -} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/manifests.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/manifests.go deleted file mode 100644 index 6451437886..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/manifests.go +++ /dev/null @@ -1,491 +0,0 @@ -package containerregistry - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// ManifestsClient is the metadata API definition for the Azure Container Registry runtime -type ManifestsClient struct { - BaseClient -} - -// NewManifestsClient creates an instance of the ManifestsClient client. -func NewManifestsClient(loginURI string) ManifestsClient { - return ManifestsClient{New(loginURI)} -} - -// Create put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. -// Parameters: -// name - name of the image (including the namespace) -// reference - a tag or a digest, pointing to a specific image -// payload - manifest body, can take v1 or v2 values depending on accept header -func (client ManifestsClient) Create(ctx context.Context, name string, reference string, payload Manifest) (result SetObject, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManifestsClient.Create") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.CreatePreparer(ctx, name, reference, payload) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Create", nil, "Failure preparing request") - return - } - - resp, err := client.CreateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Create", resp, "Failure sending request") - return - } - - result, err = client.CreateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Create", resp, "Failure responding to request") - return - } - - return -} - -// CreatePreparer prepares the Create request. -func (client ManifestsClient) CreatePreparer(ctx context.Context, name string, reference string, payload Manifest) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - "reference": autorest.Encode("path", reference), - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/vnd.docker.distribution.manifest.v2+json"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/v2/{name}/manifests/{reference}", pathParameters), - autorest.WithJSON(payload)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateSender sends the Create request. The method will close the -// http.Response Body if it receives an error. -func (client ManifestsClient) CreateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// CreateResponder handles the response to the Create request. The method always -// closes the http.Response Body. -func (client ManifestsClient) CreateResponder(resp *http.Response) (result SetObject, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result.Value), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by -// `digest`. -// Parameters: -// name - name of the image (including the namespace) -// reference - a tag or a digest, pointing to a specific image -func (client ManifestsClient) Delete(ctx context.Context, name string, reference string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManifestsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, name, reference) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client ManifestsClient) DeletePreparer(ctx context.Context, name string, reference string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - "reference": autorest.Encode("path", reference), - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/v2/{name}/manifests/{reference}", pathParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client ManifestsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client ManifestsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get get the manifest identified by `name` and `reference` where `reference` can be a tag or digest. -// Parameters: -// name - name of the image (including the namespace) -// reference - a tag or a digest, pointing to a specific image -// accept - accept header string delimited by comma. For example, -// application/vnd.docker.distribution.manifest.v2+json -func (client ManifestsClient) Get(ctx context.Context, name string, reference string, accept string) (result ManifestWrapper, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManifestsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, name, reference, accept) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client ManifestsClient) GetPreparer(ctx context.Context, name string, reference string, accept string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - "reference": autorest.Encode("path", reference), - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/v2/{name}/manifests/{reference}", pathParameters)) - if len(accept) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("accept", autorest.String(accept))) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ManifestsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ManifestsClient) GetResponder(resp *http.Response) (result ManifestWrapper, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetAttributes get manifest attributes -// Parameters: -// name - name of the image (including the namespace) -// reference - a tag or a digest, pointing to a specific image -func (client ManifestsClient) GetAttributes(ctx context.Context, name string, reference string) (result ManifestAttributes, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManifestsClient.GetAttributes") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetAttributesPreparer(ctx, name, reference) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "GetAttributes", nil, "Failure preparing request") - return - } - - resp, err := client.GetAttributesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "GetAttributes", resp, "Failure sending request") - return - } - - result, err = client.GetAttributesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "GetAttributes", resp, "Failure responding to request") - return - } - - return -} - -// GetAttributesPreparer prepares the GetAttributes request. -func (client ManifestsClient) GetAttributesPreparer(ctx context.Context, name string, reference string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - "reference": autorest.Encode("path", reference), - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/acr/v1/{name}/_manifests/{reference}", pathParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetAttributesSender sends the GetAttributes request. The method will close the -// http.Response Body if it receives an error. -func (client ManifestsClient) GetAttributesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetAttributesResponder handles the response to the GetAttributes request. The method always -// closes the http.Response Body. -func (client ManifestsClient) GetAttributesResponder(resp *http.Response) (result ManifestAttributes, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetList list manifests of a repository -// Parameters: -// name - name of the image (including the namespace) -// last - query parameter for the last item in previous query. Result set will include values lexically after -// last. -// n - query parameter for max number of items -// orderby - orderby query parameter -func (client ManifestsClient) GetList(ctx context.Context, name string, last string, n *int32, orderby string) (result AcrManifests, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManifestsClient.GetList") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetListPreparer(ctx, name, last, n, orderby) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "GetList", nil, "Failure preparing request") - return - } - - resp, err := client.GetListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "GetList", resp, "Failure sending request") - return - } - - result, err = client.GetListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "GetList", resp, "Failure responding to request") - return - } - - return -} - -// GetListPreparer prepares the GetList request. -func (client ManifestsClient) GetListPreparer(ctx context.Context, name string, last string, n *int32, orderby string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - } - - queryParameters := map[string]interface{}{} - if len(last) > 0 { - queryParameters["last"] = autorest.Encode("query", last) - } - if n != nil { - queryParameters["n"] = autorest.Encode("query", *n) - } - if len(orderby) > 0 { - queryParameters["orderby"] = autorest.Encode("query", orderby) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/acr/v1/{name}/_manifests", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetListSender sends the GetList request. The method will close the -// http.Response Body if it receives an error. -func (client ManifestsClient) GetListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetListResponder handles the response to the GetList request. The method always -// closes the http.Response Body. -func (client ManifestsClient) GetListResponder(resp *http.Response) (result AcrManifests, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateAttributes update attributes of a manifest -// Parameters: -// name - name of the image (including the namespace) -// reference - a tag or a digest, pointing to a specific image -// value - repository attribute value -func (client ManifestsClient) UpdateAttributes(ctx context.Context, name string, reference string, value *ChangeableAttributes) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManifestsClient.UpdateAttributes") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateAttributesPreparer(ctx, name, reference, value) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "UpdateAttributes", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateAttributesSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "UpdateAttributes", resp, "Failure sending request") - return - } - - result, err = client.UpdateAttributesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "UpdateAttributes", resp, "Failure responding to request") - return - } - - return -} - -// UpdateAttributesPreparer prepares the UpdateAttributes request. -func (client ManifestsClient) UpdateAttributesPreparer(ctx context.Context, name string, reference string, value *ChangeableAttributes) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - "reference": autorest.Encode("path", reference), - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/acr/v1/{name}/_manifests/{reference}", pathParameters)) - if value != nil { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(value)) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateAttributesSender sends the UpdateAttributes request. The method will close the -// http.Response Body if it receives an error. -func (client ManifestsClient) UpdateAttributesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateAttributesResponder handles the response to the UpdateAttributes request. The method always -// closes the http.Response Body. -func (client ManifestsClient) UpdateAttributesResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/models.go deleted file mode 100644 index 5ae4916064..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/models.go +++ /dev/null @@ -1,628 +0,0 @@ -package containerregistry - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "encoding/json" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/date" - "io" -) - -// The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry" - -// AccessToken ... -type AccessToken struct { - autorest.Response `json:"-"` - // AccessToken - The access token for performing authenticated requests - AccessToken *string `json:"access_token,omitempty"` -} - -// AcrErrorInfo error information -type AcrErrorInfo struct { - // Code - Error code - Code *string `json:"code,omitempty"` - // Message - Error message - Message *string `json:"message,omitempty"` - // Detail - Error details - Detail interface{} `json:"detail,omitempty"` -} - -// AcrErrors acr error response describing why the operation failed -type AcrErrors struct { - // Errors - Array of detailed error - Errors *[]AcrErrorInfo `json:"errors,omitempty"` -} - -// AcrManifests manifest attributes -type AcrManifests struct { - autorest.Response `json:"-"` - // Registry - Registry name - Registry *string `json:"registry,omitempty"` - // ImageName - Image name - ImageName *string `json:"imageName,omitempty"` - // ManifestsAttributes - List of manifests - ManifestsAttributes *[]ManifestAttributesBase `json:"manifests,omitempty"` -} - -// Annotations additional information provided through arbitrary metadata. -type Annotations struct { - // AdditionalProperties - Unmatched properties from the message are deserialized this collection - AdditionalProperties map[string]interface{} `json:""` - // Created - Date and time on which the image was built (string, date-time as defined by https://tools.ietf.org/html/rfc3339#section-5.6) - Created *date.Time `json:"org.opencontainers.image.created,omitempty"` - // Authors - Contact details of the people or organization responsible for the image. - Authors *string `json:"org.opencontainers.image.authors,omitempty"` - // URL - URL to find more information on the image. - URL *string `json:"org.opencontainers.image.url,omitempty"` - // Documentation - URL to get documentation on the image. - Documentation *string `json:"org.opencontainers.image.documentation,omitempty"` - // Source - URL to get source code for building the image. - Source *string `json:"org.opencontainers.image.source,omitempty"` - // Version - Version of the packaged software. The version MAY match a label or tag in the source code repository, may also be Semantic versioning-compatible - Version *string `json:"org.opencontainers.image.version,omitempty"` - // Revision - Source control revision identifier for the packaged software. - Revision *string `json:"org.opencontainers.image.revision,omitempty"` - // Vendor - Name of the distributing entity, organization or individual. - Vendor *string `json:"org.opencontainers.image.vendor,omitempty"` - // Licenses - License(s) under which contained software is distributed as an SPDX License Expression. - Licenses *string `json:"org.opencontainers.image.licenses,omitempty"` - // Name - Name of the reference for a target. - Name *string `json:"org.opencontainers.image.ref.name,omitempty"` - // Title - Human-readable title of the image - Title *string `json:"org.opencontainers.image.title,omitempty"` - // Description - Human-readable description of the software packaged in the image - Description *string `json:"org.opencontainers.image.description,omitempty"` -} - -// MarshalJSON is the custom marshaler for Annotations. -func (a Annotations) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if a.Created != nil { - objectMap["org.opencontainers.image.created"] = a.Created - } - if a.Authors != nil { - objectMap["org.opencontainers.image.authors"] = a.Authors - } - if a.URL != nil { - objectMap["org.opencontainers.image.url"] = a.URL - } - if a.Documentation != nil { - objectMap["org.opencontainers.image.documentation"] = a.Documentation - } - if a.Source != nil { - objectMap["org.opencontainers.image.source"] = a.Source - } - if a.Version != nil { - objectMap["org.opencontainers.image.version"] = a.Version - } - if a.Revision != nil { - objectMap["org.opencontainers.image.revision"] = a.Revision - } - if a.Vendor != nil { - objectMap["org.opencontainers.image.vendor"] = a.Vendor - } - if a.Licenses != nil { - objectMap["org.opencontainers.image.licenses"] = a.Licenses - } - if a.Name != nil { - objectMap["org.opencontainers.image.ref.name"] = a.Name - } - if a.Title != nil { - objectMap["org.opencontainers.image.title"] = a.Title - } - if a.Description != nil { - objectMap["org.opencontainers.image.description"] = a.Description - } - for k, v := range a.AdditionalProperties { - objectMap[k] = v - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for Annotations struct. -func (a *Annotations) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - default: - if v != nil { - var additionalProperties interface{} - err = json.Unmarshal(*v, &additionalProperties) - if err != nil { - return err - } - if a.AdditionalProperties == nil { - a.AdditionalProperties = make(map[string]interface{}) - } - a.AdditionalProperties[k] = additionalProperties - } - case "org.opencontainers.image.created": - if v != nil { - var created date.Time - err = json.Unmarshal(*v, &created) - if err != nil { - return err - } - a.Created = &created - } - case "org.opencontainers.image.authors": - if v != nil { - var authors string - err = json.Unmarshal(*v, &authors) - if err != nil { - return err - } - a.Authors = &authors - } - case "org.opencontainers.image.url": - if v != nil { - var URL string - err = json.Unmarshal(*v, &URL) - if err != nil { - return err - } - a.URL = &URL - } - case "org.opencontainers.image.documentation": - if v != nil { - var documentation string - err = json.Unmarshal(*v, &documentation) - if err != nil { - return err - } - a.Documentation = &documentation - } - case "org.opencontainers.image.source": - if v != nil { - var source string - err = json.Unmarshal(*v, &source) - if err != nil { - return err - } - a.Source = &source - } - case "org.opencontainers.image.version": - if v != nil { - var version string - err = json.Unmarshal(*v, &version) - if err != nil { - return err - } - a.Version = &version - } - case "org.opencontainers.image.revision": - if v != nil { - var revision string - err = json.Unmarshal(*v, &revision) - if err != nil { - return err - } - a.Revision = &revision - } - case "org.opencontainers.image.vendor": - if v != nil { - var vendor string - err = json.Unmarshal(*v, &vendor) - if err != nil { - return err - } - a.Vendor = &vendor - } - case "org.opencontainers.image.licenses": - if v != nil { - var licenses string - err = json.Unmarshal(*v, &licenses) - if err != nil { - return err - } - a.Licenses = &licenses - } - case "org.opencontainers.image.ref.name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - a.Name = &name - } - case "org.opencontainers.image.title": - if v != nil { - var title string - err = json.Unmarshal(*v, &title) - if err != nil { - return err - } - a.Title = &title - } - case "org.opencontainers.image.description": - if v != nil { - var description string - err = json.Unmarshal(*v, &description) - if err != nil { - return err - } - a.Description = &description - } - } - } - - return nil -} - -// ChangeableAttributes ... -type ChangeableAttributes struct { - // DeleteEnabled - Delete enabled - DeleteEnabled *bool `json:"deleteEnabled,omitempty"` - // WriteEnabled - Write enabled - WriteEnabled *bool `json:"writeEnabled,omitempty"` - // ListEnabled - List enabled - ListEnabled *bool `json:"listEnabled,omitempty"` - // ReadEnabled - Read enabled - ReadEnabled *bool `json:"readEnabled,omitempty"` -} - -// DeletedRepository deleted repository -type DeletedRepository struct { - autorest.Response `json:"-"` - // ManifestsDeleted - SHA of the deleted image - ManifestsDeleted *[]string `json:"manifestsDeleted,omitempty"` - // TagsDeleted - Tag of the deleted image - TagsDeleted *[]string `json:"tagsDeleted,omitempty"` -} - -// Descriptor docker V2 image layer descriptor including config and layers -type Descriptor struct { - // MediaType - Layer media type - MediaType *string `json:"mediaType,omitempty"` - // Size - Layer size - Size *int64 `json:"size,omitempty"` - // Digest - Layer digest - Digest *string `json:"digest,omitempty"` - // Urls - Specifies a list of URIs from which this object may be downloaded. - Urls *[]string `json:"urls,omitempty"` - Annotations *Annotations `json:"annotations,omitempty"` -} - -// FsLayer image layer information -type FsLayer struct { - // BlobSum - SHA of an image layer - BlobSum *string `json:"blobSum,omitempty"` -} - -// History a list of unstructured historical data for v1 compatibility -type History struct { - // V1Compatibility - The raw v1 compatibility information - V1Compatibility *string `json:"v1Compatibility,omitempty"` -} - -// ImageSignature signature of a signed manifest -type ImageSignature struct { - // Header - A JSON web signature - Header *JWK `json:"header,omitempty"` - // Signature - A signature for the image manifest, signed by a libtrust private key - Signature *string `json:"signature,omitempty"` - // Protected - The signed protected header - Protected *string `json:"protected,omitempty"` -} - -// JWK a JSON web signature -type JWK struct { - Jwk *JWKHeader `json:"jwk,omitempty"` - // Alg - The algorithm used to sign or encrypt the JWT - Alg *string `json:"alg,omitempty"` -} - -// JWKHeader JSON web key parameter -type JWKHeader struct { - // Crv - crv value - Crv *string `json:"crv,omitempty"` - // Kid - kid value - Kid *string `json:"kid,omitempty"` - // Kty - kty value - Kty *string `json:"kty,omitempty"` - // X - x value - X *string `json:"x,omitempty"` - // Y - y value - Y *string `json:"y,omitempty"` -} - -// Manifest returns the requested manifest file -type Manifest struct { - // SchemaVersion - Schema version - SchemaVersion *int32 `json:"schemaVersion,omitempty"` -} - -// ManifestAttributes manifest attributes details -type ManifestAttributes struct { - autorest.Response `json:"-"` - // Registry - Registry name - Registry *string `json:"registry,omitempty"` - // ImageName - Image name - ImageName *string `json:"imageName,omitempty"` - // Attributes - Manifest attributes - Attributes *ManifestAttributesBase `json:"manifest,omitempty"` -} - -// ManifestAttributesBase manifest details -type ManifestAttributesBase struct { - // Digest - Manifest - Digest *string `json:"digest,omitempty"` - // ImageSize - Image size - ImageSize *int64 `json:"imageSize,omitempty"` - // CreatedTime - Created time - CreatedTime *string `json:"createdTime,omitempty"` - // LastUpdateTime - Last update time - LastUpdateTime *string `json:"lastUpdateTime,omitempty"` - // Architecture - CPU architecture - Architecture *string `json:"architecture,omitempty"` - // Os - Operating system - Os *string `json:"os,omitempty"` - // MediaType - Media type - MediaType *string `json:"mediaType,omitempty"` - // ConfigMediaType - Config blob media type - ConfigMediaType *string `json:"configMediaType,omitempty"` - // Tags - List of tags - Tags *[]string `json:"tags,omitempty"` - // ChangeableAttributes - Changeable attributes - ChangeableAttributes *ChangeableAttributes `json:"changeableAttributes,omitempty"` -} - -// ManifestAttributesManifest list of manifest attributes -type ManifestAttributesManifest struct { - // References - List of manifest attributes details - References *[]ManifestAttributesManifestReferences `json:"references,omitempty"` - // QuarantineTag - Quarantine tag name - QuarantineTag *string `json:"quarantineTag,omitempty"` -} - -// ManifestAttributesManifestReferences manifest attributes details -type ManifestAttributesManifestReferences struct { - // Digest - Manifest digest - Digest *string `json:"digest,omitempty"` - // Architecture - CPU architecture - Architecture *string `json:"architecture,omitempty"` - // Os - Operating system - Os *string `json:"os,omitempty"` -} - -// ManifestChangeableAttributes changeable attributes -type ManifestChangeableAttributes struct { - // DeleteEnabled - Delete enabled - DeleteEnabled *bool `json:"deleteEnabled,omitempty"` - // WriteEnabled - Write enabled - WriteEnabled *bool `json:"writeEnabled,omitempty"` - // ListEnabled - List enabled - ListEnabled *bool `json:"listEnabled,omitempty"` - // ReadEnabled - Read enabled - ReadEnabled *bool `json:"readEnabled,omitempty"` - // QuarantineState - Quarantine state - QuarantineState *string `json:"quarantineState,omitempty"` - // QuarantineDetails - Quarantine details - QuarantineDetails *string `json:"quarantineDetails,omitempty"` -} - -// ManifestList returns the requested Docker multi-arch-manifest file -type ManifestList struct { - // MediaType - Media type for this Manifest - MediaType *string `json:"mediaType,omitempty"` - // Manifests - List of V2 image layer information - Manifests *[]ManifestListAttributes `json:"manifests,omitempty"` - // SchemaVersion - Schema version - SchemaVersion *int32 `json:"schemaVersion,omitempty"` -} - -// ManifestListAttributes ... -type ManifestListAttributes struct { - // MediaType - The MIME type of the referenced object. This will generally be application/vnd.docker.image.manifest.v2+json, but it could also be application/vnd.docker.image.manifest.v1+json - MediaType *string `json:"mediaType,omitempty"` - // Size - The size in bytes of the object - Size *int64 `json:"size,omitempty"` - // Digest - The digest of the content, as defined by the Registry V2 HTTP API Specification - Digest *string `json:"digest,omitempty"` - Platform *Platform `json:"platform,omitempty"` -} - -// ManifestWrapper returns the requested manifest file -type ManifestWrapper struct { - autorest.Response `json:"-"` - // MediaType - Media type for this Manifest - MediaType *string `json:"mediaType,omitempty"` - // Manifests - (ManifestList, OCIIndex) List of V2 image layer information - Manifests *[]ManifestListAttributes `json:"manifests,omitempty"` - // Config - (V2, OCI) Image config descriptor - Config *Descriptor `json:"config,omitempty"` - // Layers - (V2, OCI) List of V2 image layer information - Layers *[]Descriptor `json:"layers,omitempty"` - // Annotations - (OCI, OCIIndex) Additional metadata - Annotations *Annotations `json:"annotations,omitempty"` - // Architecture - (V1) CPU architecture - Architecture *string `json:"architecture,omitempty"` - // Name - (V1) Image name - Name *string `json:"name,omitempty"` - // Tag - (V1) Image tag - Tag *string `json:"tag,omitempty"` - // FsLayers - (V1) List of layer information - FsLayers *[]FsLayer `json:"fsLayers,omitempty"` - // History - (V1) Image history - History *[]History `json:"history,omitempty"` - // Signatures - (V1) Image signature - Signatures *[]ImageSignature `json:"signatures,omitempty"` - // SchemaVersion - Schema version - SchemaVersion *int32 `json:"schemaVersion,omitempty"` -} - -// OCIIndex returns the requested OCI index file -type OCIIndex struct { - // Manifests - List of OCI image layer information - Manifests *[]ManifestListAttributes `json:"manifests,omitempty"` - Annotations *Annotations `json:"annotations,omitempty"` - // SchemaVersion - Schema version - SchemaVersion *int32 `json:"schemaVersion,omitempty"` -} - -// OCIManifest returns the requested OCI Manifest file -type OCIManifest struct { - // Config - V2 image config descriptor - Config *Descriptor `json:"config,omitempty"` - // Layers - List of V2 image layer information - Layers *[]Descriptor `json:"layers,omitempty"` - Annotations *Annotations `json:"annotations,omitempty"` - // SchemaVersion - Schema version - SchemaVersion *int32 `json:"schemaVersion,omitempty"` -} - -// Platform the platform object describes the platform which the image in the manifest runs on. A full list -// of valid operating system and architecture values are listed in the Go language documentation for $GOOS -// and $GOARCH -type Platform struct { - // Architecture - Specifies the CPU architecture, for example amd64 or ppc64le. - Architecture *string `json:"architecture,omitempty"` - // Os - The os field specifies the operating system, for example linux or windows. - Os *string `json:"os,omitempty"` - // OsVersion - The optional os.version field specifies the operating system version, for example 10.0.10586. - OsVersion *string `json:"os.version,omitempty"` - // OsFeatures - The optional os.features field specifies an array of strings, each listing a required OS feature (for example on Windows win32k - OsFeatures *[]string `json:"os.features,omitempty"` - // Variant - The optional variant field specifies a variant of the CPU, for example armv6l to specify a particular CPU variant of the ARM CPU. - Variant *string `json:"variant,omitempty"` - // Features - The optional features field specifies an array of strings, each listing a required CPU feature (for example sse4 or aes - Features *[]string `json:"features,omitempty"` -} - -// ReadCloser ... -type ReadCloser struct { - autorest.Response `json:"-"` - Value *io.ReadCloser `json:"value,omitempty"` -} - -// RefreshToken ... -type RefreshToken struct { - autorest.Response `json:"-"` - // RefreshToken - The refresh token to be used for generating access tokens - RefreshToken *string `json:"refresh_token,omitempty"` -} - -// Repositories list of repositories -type Repositories struct { - autorest.Response `json:"-"` - // Names - Repository names - Names *[]string `json:"repositories,omitempty"` -} - -// RepositoryAttributes repository attributes -type RepositoryAttributes struct { - autorest.Response `json:"-"` - // Registry - Registry name - Registry *string `json:"registry,omitempty"` - // ImageName - Image name - ImageName *string `json:"imageName,omitempty"` - // CreatedTime - Image created time - CreatedTime *string `json:"createdTime,omitempty"` - // LastUpdateTime - Image last update time - LastUpdateTime *string `json:"lastUpdateTime,omitempty"` - // ManifestCount - Number of the manifests - ManifestCount *int32 `json:"manifestCount,omitempty"` - // TagCount - Number of the tags - TagCount *int32 `json:"tagCount,omitempty"` - // ChangeableAttributes - Changeable attributes - ChangeableAttributes *ChangeableAttributes `json:"changeableAttributes,omitempty"` -} - -// RepositoryTags result of the request to list tags of the image -type RepositoryTags struct { - // Name - Name of the image - Name *string `json:"name,omitempty"` - // Tags - List of tags - Tags *[]string `json:"tags,omitempty"` -} - -// SetObject ... -type SetObject struct { - autorest.Response `json:"-"` - Value interface{} `json:"value,omitempty"` -} - -// TagAttributes tag attributes -type TagAttributes struct { - autorest.Response `json:"-"` - // Registry - Registry name - Registry *string `json:"registry,omitempty"` - // ImageName - Image name - ImageName *string `json:"imageName,omitempty"` - // Attributes - List of tag attribute details - Attributes *TagAttributesBase `json:"tag,omitempty"` -} - -// TagAttributesBase tag attribute details -type TagAttributesBase struct { - // Name - Tag name - Name *string `json:"name,omitempty"` - // Digest - Tag digest - Digest *string `json:"digest,omitempty"` - // CreatedTime - Tag created time - CreatedTime *string `json:"createdTime,omitempty"` - // LastUpdateTime - Tag last update time - LastUpdateTime *string `json:"lastUpdateTime,omitempty"` - // Signed - Is signed - Signed *bool `json:"signed,omitempty"` - // ChangeableAttributes - Changeable attributes - ChangeableAttributes *ChangeableAttributes `json:"changeableAttributes,omitempty"` -} - -// TagAttributesTag tag -type TagAttributesTag struct { - // SignatureRecord - SignatureRecord value - SignatureRecord *string `json:"signatureRecord,omitempty"` -} - -// TagList list of tag details -type TagList struct { - autorest.Response `json:"-"` - // Registry - Registry name - Registry *string `json:"registry,omitempty"` - // ImageName - Image name - ImageName *string `json:"imageName,omitempty"` - // Tags - List of tag attribute details - Tags *[]TagAttributesBase `json:"tags,omitempty"` -} - -// V1Manifest returns the requested V1 manifest file -type V1Manifest struct { - // Architecture - CPU architecture - Architecture *string `json:"architecture,omitempty"` - // Name - Image name - Name *string `json:"name,omitempty"` - // Tag - Image tag - Tag *string `json:"tag,omitempty"` - // FsLayers - List of layer information - FsLayers *[]FsLayer `json:"fsLayers,omitempty"` - // History - Image history - History *[]History `json:"history,omitempty"` - // Signatures - Image signature - Signatures *[]ImageSignature `json:"signatures,omitempty"` - // SchemaVersion - Schema version - SchemaVersion *int32 `json:"schemaVersion,omitempty"` -} - -// V2Manifest returns the requested Docker V2 Manifest file -type V2Manifest struct { - // MediaType - Media type for this Manifest - MediaType *string `json:"mediaType,omitempty"` - // Config - V2 image config descriptor - Config *Descriptor `json:"config,omitempty"` - // Layers - List of V2 image layer information - Layers *[]Descriptor `json:"layers,omitempty"` - // SchemaVersion - Schema version - SchemaVersion *int32 `json:"schemaVersion,omitempty"` -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/refreshtokens.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/refreshtokens.go deleted file mode 100644 index a3f2ef220f..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/refreshtokens.go +++ /dev/null @@ -1,111 +0,0 @@ -package containerregistry - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// RefreshTokensClient is the metadata API definition for the Azure Container Registry runtime -type RefreshTokensClient struct { - BaseClient -} - -// NewRefreshTokensClient creates an instance of the RefreshTokensClient client. -func NewRefreshTokensClient(loginURI string) RefreshTokensClient { - return RefreshTokensClient{New(loginURI)} -} - -// GetFromExchange exchange AAD tokens for an ACR refresh Token -// Parameters: -// grantType - can take a value of access_token_refresh_token, or access_token, or refresh_token -// service - indicates the name of your Azure container registry. -// tenant - AAD tenant associated to the AAD credentials. -// refreshToken - AAD refresh token, mandatory when grant_type is access_token_refresh_token or refresh_token -// accessToken - AAD access token, mandatory when grant_type is access_token_refresh_token or access_token. -func (client RefreshTokensClient) GetFromExchange(ctx context.Context, grantType string, service string, tenant string, refreshToken string, accessToken string) (result RefreshToken, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RefreshTokensClient.GetFromExchange") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetFromExchangePreparer(ctx, grantType, service, tenant, refreshToken, accessToken) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RefreshTokensClient", "GetFromExchange", nil, "Failure preparing request") - return - } - - resp, err := client.GetFromExchangeSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.RefreshTokensClient", "GetFromExchange", resp, "Failure sending request") - return - } - - result, err = client.GetFromExchangeResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RefreshTokensClient", "GetFromExchange", resp, "Failure responding to request") - return - } - - return -} - -// GetFromExchangePreparer prepares the GetFromExchange request. -func (client RefreshTokensClient) GetFromExchangePreparer(ctx context.Context, grantType string, service string, tenant string, refreshToken string, accessToken string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - formDataParameters := map[string]interface{}{ - "grant_type": grantType, - "service": service, - } - if len(tenant) > 0 { - formDataParameters["tenant"] = tenant - } - if len(refreshToken) > 0 { - formDataParameters["refresh_token"] = refreshToken - } - if len(accessToken) > 0 { - formDataParameters["access_token"] = accessToken - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPath("/oauth2/exchange"), - autorest.WithFormData(autorest.MapToValues(formDataParameters))) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetFromExchangeSender sends the GetFromExchange request. The method will close the -// http.Response Body if it receives an error. -func (client RefreshTokensClient) GetFromExchangeSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetFromExchangeResponder handles the response to the GetFromExchange request. The method always -// closes the http.Response Body. -func (client RefreshTokensClient) GetFromExchangeResponder(resp *http.Response) (result RefreshToken, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/repository.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/repository.go deleted file mode 100644 index 7b7c4e3fd5..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/repository.go +++ /dev/null @@ -1,321 +0,0 @@ -package containerregistry - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// RepositoryClient is the metadata API definition for the Azure Container Registry runtime -type RepositoryClient struct { - BaseClient -} - -// NewRepositoryClient creates an instance of the RepositoryClient client. -func NewRepositoryClient(loginURI string) RepositoryClient { - return RepositoryClient{New(loginURI)} -} - -// Delete delete the repository identified by `name` -// Parameters: -// name - name of the image (including the namespace) -func (client RepositoryClient) Delete(ctx context.Context, name string) (result DeletedRepository, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RepositoryClient.Delete") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, name) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client RepositoryClient) DeletePreparer(ctx context.Context, name string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/acr/v1/{name}", pathParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client RepositoryClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client RepositoryClient) DeleteResponder(resp *http.Response) (result DeletedRepository, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetAttributes get repository attributes -// Parameters: -// name - name of the image (including the namespace) -func (client RepositoryClient) GetAttributes(ctx context.Context, name string) (result RepositoryAttributes, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RepositoryClient.GetAttributes") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetAttributesPreparer(ctx, name) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "GetAttributes", nil, "Failure preparing request") - return - } - - resp, err := client.GetAttributesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "GetAttributes", resp, "Failure sending request") - return - } - - result, err = client.GetAttributesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "GetAttributes", resp, "Failure responding to request") - return - } - - return -} - -// GetAttributesPreparer prepares the GetAttributes request. -func (client RepositoryClient) GetAttributesPreparer(ctx context.Context, name string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/acr/v1/{name}", pathParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetAttributesSender sends the GetAttributes request. The method will close the -// http.Response Body if it receives an error. -func (client RepositoryClient) GetAttributesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetAttributesResponder handles the response to the GetAttributes request. The method always -// closes the http.Response Body. -func (client RepositoryClient) GetAttributesResponder(resp *http.Response) (result RepositoryAttributes, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetList list repositories -// Parameters: -// last - query parameter for the last item in previous query. Result set will include values lexically after -// last. -// n - query parameter for max number of items -func (client RepositoryClient) GetList(ctx context.Context, last string, n *int32) (result Repositories, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RepositoryClient.GetList") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetListPreparer(ctx, last, n) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "GetList", nil, "Failure preparing request") - return - } - - resp, err := client.GetListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "GetList", resp, "Failure sending request") - return - } - - result, err = client.GetListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "GetList", resp, "Failure responding to request") - return - } - - return -} - -// GetListPreparer prepares the GetList request. -func (client RepositoryClient) GetListPreparer(ctx context.Context, last string, n *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - queryParameters := map[string]interface{}{} - if len(last) > 0 { - queryParameters["last"] = autorest.Encode("query", last) - } - if n != nil { - queryParameters["n"] = autorest.Encode("query", *n) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPath("/acr/v1/_catalog"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetListSender sends the GetList request. The method will close the -// http.Response Body if it receives an error. -func (client RepositoryClient) GetListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetListResponder handles the response to the GetList request. The method always -// closes the http.Response Body. -func (client RepositoryClient) GetListResponder(resp *http.Response) (result Repositories, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateAttributes update the attribute identified by `name` where `reference` is the name of the repository. -// Parameters: -// name - name of the image (including the namespace) -// value - repository attribute value -func (client RepositoryClient) UpdateAttributes(ctx context.Context, name string, value *ChangeableAttributes) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RepositoryClient.UpdateAttributes") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateAttributesPreparer(ctx, name, value) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "UpdateAttributes", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateAttributesSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "UpdateAttributes", resp, "Failure sending request") - return - } - - result, err = client.UpdateAttributesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "UpdateAttributes", resp, "Failure responding to request") - return - } - - return -} - -// UpdateAttributesPreparer prepares the UpdateAttributes request. -func (client RepositoryClient) UpdateAttributesPreparer(ctx context.Context, name string, value *ChangeableAttributes) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/acr/v1/{name}", pathParameters)) - if value != nil { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(value)) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateAttributesSender sends the UpdateAttributes request. The method will close the -// http.Response Body if it receives an error. -func (client RepositoryClient) UpdateAttributesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateAttributesResponder handles the response to the UpdateAttributes request. The method always -// closes the http.Response Body. -func (client RepositoryClient) UpdateAttributesResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/tag.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/tag.go deleted file mode 100644 index f53d4f36e5..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/tag.go +++ /dev/null @@ -1,339 +0,0 @@ -package containerregistry - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// TagClient is the metadata API definition for the Azure Container Registry runtime -type TagClient struct { - BaseClient -} - -// NewTagClient creates an instance of the TagClient client. -func NewTagClient(loginURI string) TagClient { - return TagClient{New(loginURI)} -} - -// Delete delete tag -// Parameters: -// name - name of the image (including the namespace) -// reference - tag name -func (client TagClient) Delete(ctx context.Context, name string, reference string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TagClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, name, reference) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client TagClient) DeletePreparer(ctx context.Context, name string, reference string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - "reference": autorest.Encode("path", reference), - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/acr/v1/{name}/_tags/{reference}", pathParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client TagClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client TagClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByClosing()) - result.Response = resp - return -} - -// GetAttributes get tag attributes by tag -// Parameters: -// name - name of the image (including the namespace) -// reference - tag name -func (client TagClient) GetAttributes(ctx context.Context, name string, reference string) (result TagAttributes, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TagClient.GetAttributes") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetAttributesPreparer(ctx, name, reference) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "GetAttributes", nil, "Failure preparing request") - return - } - - resp, err := client.GetAttributesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "GetAttributes", resp, "Failure sending request") - return - } - - result, err = client.GetAttributesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "GetAttributes", resp, "Failure responding to request") - return - } - - return -} - -// GetAttributesPreparer prepares the GetAttributes request. -func (client TagClient) GetAttributesPreparer(ctx context.Context, name string, reference string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - "reference": autorest.Encode("path", reference), - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/acr/v1/{name}/_tags/{reference}", pathParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetAttributesSender sends the GetAttributes request. The method will close the -// http.Response Body if it receives an error. -func (client TagClient) GetAttributesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetAttributesResponder handles the response to the GetAttributes request. The method always -// closes the http.Response Body. -func (client TagClient) GetAttributesResponder(resp *http.Response) (result TagAttributes, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetList list tags of a repository -// Parameters: -// name - name of the image (including the namespace) -// last - query parameter for the last item in previous query. Result set will include values lexically after -// last. -// n - query parameter for max number of items -// orderby - orderby query parameter -// digest - filter by digest -func (client TagClient) GetList(ctx context.Context, name string, last string, n *int32, orderby string, digest string) (result TagList, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TagClient.GetList") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetListPreparer(ctx, name, last, n, orderby, digest) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "GetList", nil, "Failure preparing request") - return - } - - resp, err := client.GetListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "GetList", resp, "Failure sending request") - return - } - - result, err = client.GetListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "GetList", resp, "Failure responding to request") - return - } - - return -} - -// GetListPreparer prepares the GetList request. -func (client TagClient) GetListPreparer(ctx context.Context, name string, last string, n *int32, orderby string, digest string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - } - - queryParameters := map[string]interface{}{} - if len(last) > 0 { - queryParameters["last"] = autorest.Encode("query", last) - } - if n != nil { - queryParameters["n"] = autorest.Encode("query", *n) - } - if len(orderby) > 0 { - queryParameters["orderby"] = autorest.Encode("query", orderby) - } - if len(digest) > 0 { - queryParameters["digest"] = autorest.Encode("query", digest) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/acr/v1/{name}/_tags", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetListSender sends the GetList request. The method will close the -// http.Response Body if it receives an error. -func (client TagClient) GetListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetListResponder handles the response to the GetList request. The method always -// closes the http.Response Body. -func (client TagClient) GetListResponder(resp *http.Response) (result TagList, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateAttributes update tag attributes -// Parameters: -// name - name of the image (including the namespace) -// reference - tag name -// value - repository attribute value -func (client TagClient) UpdateAttributes(ctx context.Context, name string, reference string, value *ChangeableAttributes) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TagClient.UpdateAttributes") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateAttributesPreparer(ctx, name, reference, value) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "UpdateAttributes", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateAttributesSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "UpdateAttributes", resp, "Failure sending request") - return - } - - result, err = client.UpdateAttributesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "UpdateAttributes", resp, "Failure responding to request") - return - } - - return -} - -// UpdateAttributesPreparer prepares the UpdateAttributes request. -func (client TagClient) UpdateAttributesPreparer(ctx context.Context, name string, reference string, value *ChangeableAttributes) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - "reference": autorest.Encode("path", reference), - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPathParameters("/acr/v1/{name}/_tags/{reference}", pathParameters)) - if value != nil { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(value)) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateAttributesSender sends the UpdateAttributes request. The method will close the -// http.Response Body if it receives an error. -func (client TagClient) UpdateAttributesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateAttributesResponder handles the response to the UpdateAttributes request. The method always -// closes the http.Response Body. -func (client TagClient) UpdateAttributesResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/v2support.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/v2support.go deleted file mode 100644 index 58fca35cbb..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/v2support.go +++ /dev/null @@ -1,89 +0,0 @@ -package containerregistry - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// V2SupportClient is the metadata API definition for the Azure Container Registry runtime -type V2SupportClient struct { - BaseClient -} - -// NewV2SupportClient creates an instance of the V2SupportClient client. -func NewV2SupportClient(loginURI string) V2SupportClient { - return V2SupportClient{New(loginURI)} -} - -// Check tells whether this Docker Registry instance supports Docker Registry HTTP API v2 -func (client V2SupportClient) Check(ctx context.Context) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/V2SupportClient.Check") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.CheckPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.V2SupportClient", "Check", nil, "Failure preparing request") - return - } - - resp, err := client.CheckSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerregistry.V2SupportClient", "Check", resp, "Failure sending request") - return - } - - result, err = client.CheckResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.V2SupportClient", "Check", resp, "Failure responding to request") - return - } - - return -} - -// CheckPreparer prepares the Check request. -func (client V2SupportClient) CheckPreparer(ctx context.Context) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "url": client.LoginURI, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{url}", urlParameters), - autorest.WithPath("/v2/")) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CheckSender sends the Check request. The method will close the -// http.Response Body if it receives an error. -func (client V2SupportClient) CheckSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// CheckResponder handles the response to the Check request. The method always -// closes the http.Response Body. -func (client V2SupportClient) CheckResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/version.go deleted file mode 100644 index 1298b04717..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/version.go +++ /dev/null @@ -1,19 +0,0 @@ -package containerregistry - -import "github.com/Azure/azure-sdk-for-go/version" - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/" + Version() + " containerregistry/2019-08-15-preview" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return version.Number -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go deleted file mode 100644 index bcfbb15cce..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ /dev/null @@ -1,7 +0,0 @@ -package version - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -// Number contains the semantic version of this SDK. -const Number = "v68.0.0" diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore deleted file mode 100644 index 3350aaf706..0000000000 --- a/vendor/github.com/Azure/go-autorest/.gitignore +++ /dev/null @@ -1,32 +0,0 @@ -# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -.DS_Store -.idea/ -.vscode/ - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# go-autorest specific -vendor/ -autorest/azure/example/example diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md deleted file mode 100644 index d1f596bfc9..0000000000 --- a/vendor/github.com/Azure/go-autorest/CHANGELOG.md +++ /dev/null @@ -1,1004 +0,0 @@ -# CHANGELOG - -## v14.2.0 - -- Added package comment to make `github.com/Azure/go-autorest` importable. - -## v14.1.1 - -### Bug Fixes - -- Change `x-ms-authorization-auxiliary` header value separator to comma. - -## v14.1.0 - -### New Features - -- Added `azure.SetEnvironment()` that will update the global environments map with the specified values. - -## v14.0.1 - -### Bug Fixes - -- Fix race condition when refreshing token. -- Fixed some tests to work with Go 1.14. - -## v14.0.0 - -## Breaking Changes - -- By default, the `DoRetryForStatusCodes` functions will no longer infinitely retry a request when the response returns an HTTP status code of 429 (StatusTooManyRequests). To opt in to the old behavior set `autorest.Count429AsRetry` to `false`. - -## New Features - -- Variable `autorest.Max429Delay` can be used to control the maximum delay between retries when a 429 is received with no `Retry-After` header. The default is zero which means there is no cap. - -## v13.4.0 - -## New Features - -- Added field `SendDecorators` to the `Client` type. This can be used to specify a custom chain of SendDecorators per client. -- Added method `Client.Send()` which includes logic for selecting the preferred chain of SendDecorators. - -## v13.3.3 - -### Bug Fixes - -- Fixed connection leak when retrying requests. -- Enabled exponential back-off with a 2-minute cap when retrying on 429. -- Fixed some cases where errors were inadvertently dropped. - -## v13.3.2 - -### Bug Fixes - -- Updated `autorest.AsStringSlice()` to convert slice elements to their string representation. - -## v13.3.1 - -- Updated external dependencies. - -### Bug Fixes - -## v13.3.0 - -### New Features - -- Added support for shared key and shared access signature token authorization. - - `autorest.NewSharedKeyAuthorizer()` and dependent types. - - `autorest.NewSASTokenAuthorizer()` and dependent types. -- Added `ServicePrincipalToken.SetCustomRefresh()` so a custom refresh function can be invoked when a token has expired. - -### Bug Fixes - -- Fixed `cli.AccessTokensPath()` to respect `AZURE_CONFIG_DIR` when set. -- Support parsing error messages in XML responses. - -## v13.2.0 - -### New Features - -- Added the following functions to replace their versions that don't take a context. - - `adal.InitiateDeviceAuthWithContext()` - - `adal.CheckForUserCompletionWithContext()` - - `adal.WaitForUserCompletionWithContext()` - -## v13.1.0 - -### New Features - -- Added support for MSI authentication on Azure App Service and Azure Functions. - -## v13.0.2 - -### Bug Fixes - -- Always retry a request even if the sender returns a non-nil error. - -## v13.0.1 - -## Bug Fixes - -- Fixed `autorest.WithQueryParameters()` so that it properly encodes multi-value query parameters. - -## v13.0.0 - -## Breaking Changes - -The `tracing` package has been rewritten to provide a common interface for consumers to wire in the tracing package of their choice. -What this means is that by default no tracing provider will be compiled into your program and setting the `AZURE_SDK_TRACING_ENABLED` -environment variable will have no effect. To enable this previous behavior you must now add the following import to your source file. -```go - import _ "github.com/Azure/go-autorest/tracing/opencensus" -``` -The APIs required by autorest-generated code have remained but some APIs have been removed and new ones added. -The following APIs and variables have been removed (the majority of them were moved to the `opencensus` package). -- tracing.Transport -- tracing.Enable() -- tracing.EnableWithAIForwarding() -- tracing.Disable() - -The following APIs and types have been added -- tracing.Tracer -- tracing.Register() - -To hook up a tracer simply call `tracing.Register()` passing in a type that satisfies the `tracing.Tracer` interface. - -## v12.4.3 - -### Bug Fixes - -- `autorest.MultiTenantServicePrincipalTokenAuthorizer` will now properly add its auxiliary bearer tokens. - -## v12.4.2 - -### Bug Fixes - -- Improvements to the fixes made in v12.4.1. - - Remove `override` stanza from Gopkg.toml and `replace` directive from go.mod as they don't apply when being consumed as a dependency. - - Switched to latest version of `ocagent` that still depends on protobuf v1.2. - - Add indirect dependencies to the `required` clause with matching `constraint` stanzas so that `dep` dependencies match go.sum. - -## v12.4.1 - -### Bug Fixes - -- Updated OpenCensus and OCAgent versions to versions that don't depend on v1.3+ of protobuf as it was breaking kubernetes. -- Pinned opencensus-proto to a version that's compatible with our versions of OpenCensus and OCAgent. - -## v12.4.0 - -### New Features - -- Added `autorest.WithPrepareDecorators` and `autorest.GetPrepareDecorators` for adding and retrieving a custom chain of PrepareDecorators to the provided context. - -## v12.3.0 - -### New Features - -- Support for multi-tenant via x-ms-authorization-auxiliary header has been added for client credentials with - secret scenario; this basically bundles multiple OAuthConfig and ServicePrincipalToken types into corresponding - MultiTenant* types along with a new authorizer that adds the primary and auxiliary token headers to the reqest. - The authenticaion helpers have been updated to support this scenario; if environment var AZURE_AUXILIARY_TENANT_IDS - is set with a semicolon delimited list of tenants the multi-tenant codepath will kick in to create the appropriate authorizer. - See `adal.NewMultiTenantOAuthConfig`, `adal.NewMultiTenantServicePrincipalToken` and `autorest.NewMultiTenantServicePrincipalTokenAuthorizer` - along with their supporting types and methods. -- Added `autorest.WithSendDecorators` and `autorest.GetSendDecorators` for adding and retrieving a custom chain of SendDecorators to the provided context. -- Added `autorest.DoRetryForStatusCodesWithCap` and `autorest.DelayForBackoffWithCap` to enforce an upper bound on the duration between retries. - -## v12.2.0 - -### New Features - -- Added `autorest.WithXML`, `autorest.AsMerge`, `autorest.WithBytes` preparer decorators. -- Added `autorest.ByUnmarshallingBytes` response decorator. -- Added `Response.IsHTTPStatus` and `Response.HasHTTPStatus` helper methods for inspecting HTTP status code in `autorest.Response` types. - -### Bug Fixes - -- `autorest.DelayWithRetryAfter` now supports HTTP-Dates in the `Retry-After` header and is not limited to just 429 status codes. - -## v12.1.0 - -### New Features - -- Added `to.ByteSlicePtr()`. -- Added blob/queue storage resource ID to `azure.ResourceIdentifier`. - -## v12.0.0 - -### Breaking Changes - -In preparation for modules the following deprecated content has been removed. - - - async.NewFuture() - - async.Future.Done() - - async.Future.WaitForCompletion() - - async.DoPollForAsynchronous() - - The `utils` package - - validation.NewErrorWithValidationError() - - The `version` package - -## v11.9.0 - -### New Features - -- Add `ResourceIdentifiers` field to `azure.Environment` containing resource IDs for public and sovereign clouds. - -## v11.8.0 - -### New Features - -- Added `autorest.NewClientWithOptions()` to support endpoints that require free renegotiation. - -## v11.7.1 - -### Bug Fixes - -- Fix missing support for http(s) proxy when using the default sender. - -## v11.7.0 - -### New Features - -- Added methods to obtain a ServicePrincipalToken on the various credential configuration types in the `auth` package. - -## v11.6.1 - -### Bug Fixes - -- Fix ACR DNS endpoint for government clouds. -- Add Cosmos DB DNS endpoints. -- Update dependencies to resolve build breaks in OpenCensus. - -## v11.6.0 - -### New Features - -- Added type `autorest.BasicAuthorizer` to support Basic authentication. - -## v11.5.2 - -### Bug Fixes - -- Fixed `GetTokenFromCLI` did not work with zsh. - -## v11.5.1 - -### Bug Fixes - -- In `Client.sender()` set the minimum TLS version on HTTP clients to 1.2. - -## v11.5.0 - -### New Features - -- The `auth` package has been refactored so that the environment and file settings are now available. -- The methods used in `auth.NewAuthorizerFromEnvironment()` are now exported so that custom authorization chains can be created. -- Added support for certificate authorization for file-based config. - -## v11.4.0 - -### New Features - -- Added `adal.AddToUserAgent()` so callers can append custom data to the user-agent header used for ADAL requests. -- Exported `adal.UserAgent()` for parity with `autorest.Client`. - -## v11.3.2 - -### Bug Fixes - -- In `Future.WaitForCompletionRef()` if the provided context has a deadline don't add the default deadline. - -## v11.3.1 - -### Bug Fixes - -- For an LRO PUT operation the final GET URL was incorrectly set to the Location polling header in some cases. - -## v11.3.0 - -### New Features - -- Added method `ServicePrincipalToken()` to `DeviceFlowConfig` type. - -## v11.2.8 - -### Bug Fixes - -- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package. - -## v11.2.7 - -### Bug Fixes - -- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`. - Note that for backward compatibility reasons, both will work until the next major version release of the package. - -## v11.2.6 - -### Bug Fixes - -- If zero bytes are read from a polling response body don't attempt to unmarshal them. - -## v11.2.5 - -### Bug Fixes - -- Removed race condition in `autorest.DoRetryForStatusCodes`. - -## v11.2.4 - -### Bug Fixes - -- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available. - -## v11.2.1 - -NOTE: Versions of Go prior to 1.10 have been removed from CI as they no -longer work with golint. - -### Bug Fixes - -- Method `MSIConfig.Authorizer` now supports user-assigned identities. -- The adal package now reports its own user-agent string. - -## v11.2.0 - -### New Features - -- Added `tracing` package that enables instrumentation of HTTP and API calls. - Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable` - will start instrumenting the code for metrics and traces. - Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or - calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an - App Insights Local Forwarder that is needs to be running. Note that if the - AI Local Forwarder is not running tracking will still be enabled. - By default, instrumentation is disabled. Once enabled, instrumentation can also - be programatically disabled by calling `Disable`. -- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated. - -### Bug Fixes - -- Don't use the initial request's context for LRO polling. -- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if - it is already set. - -## v11.1.1 - -### Bug Fixes - -- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller. - -## v11.1.0 - -### New Features - -- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI. -- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version. - -## v11.0.1 - -### New Features - -- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication. - -## v11.0.0 - -### Breaking Changes - -- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number` - - ExpiresIn - - ExpiresOn - - NotBefore - -### New Features - -- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource. -- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration. - -## v10.15.5 - -### Bug Fixes - -- In `DoRetryForStatusCodes`, if a request's context is cancelled return the last response. - -## v10.15.4 - -### Bug Fixes - -- If a polling operation returns a failure status code return the associated error. - -## v10.15.3 - -### Bug Fixes - -- Initialize the polling URL and method for an LRO tracker on each iteration, favoring the Azure-AsyncOperation header. - -## v10.15.2 - -### Bug Fixes - -- Use fmt.Fprint when printing request/response so that any escape sequences aren't treated as format specifiers. - -## v10.15.1 - -### Bug Fixes - -- If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. -- For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure. - -## v10.15.0 - -### New Features - -- Add initial support for request/response logging via setting environment variables. - Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response - without their bodies. To include the bodies set the log level to `LogDebug`. - By default the logger writes to strerr, however it can also write to stdout or a file - if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file - already exists it will be truncated. - IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key - headers. Any other secrets will _not_ be redacted. - -## v10.14.0 - -### New Features - -- Added package version that contains version constants and user-agent data. - -### Bug Fixes - -- Add the user-agent to token requests. - -## v10.13.0 - -- Added support for additionalInfo in ServiceError type. - -## v10.12.0 - -### New Features - -- Added field ServicePrincipalToken.MaxMSIRefreshAttempts to configure the maximun number of attempts to refresh an MSI token. - -## v10.11.4 - -### Bug Fixes - -- If an LRO returns http.StatusOK on the initial response with no async headers return the response body from Future.GetResult(). -- If there is no "final GET URL" return an error from Future.GetResult(). - -## v10.11.3 - -### Bug Fixes - -- In IMDS retry logic, if we don't receive a response don't retry. - - Renamed the retry function so it's clear it's meant for IMDS only. -- For error response bodies that aren't OData-v4 compliant stick the raw JSON in the ServiceError.Details field so the information isn't lost. - - Also add the raw HTTP response to the DetailedResponse. -- Removed superfluous wrapping of response error in azure.DoRetryWithRegistration(). - -## v10.11.2 - -### Bug Fixes - -- Validation for integers handles int and int64 types. - -## v10.11.1 - -### Bug Fixes - -- Adding User information to authorization config as parsed from CLI cache. - -## v10.11.0 - -### New Features - -- Added NewServicePrincipalTokenFromManualTokenSecret for creating a new SPT using a manual token and secret -- Added method ServicePrincipalToken.MarshalTokenJSON() to marshall the inner Token - -## v10.10.0 - -### New Features - -- Most ServicePrincipalTokens can now be marshalled/unmarshall to/from JSON (ServicePrincipalCertificateSecret and ServicePrincipalMSISecret are not supported). -- Added method ServicePrincipalToken.SetRefreshCallbacks(). - -## v10.9.2 - -### Bug Fixes - -- Refreshing a refresh token obtained from a web app authorization code now works. - -## v10.9.1 - -### Bug Fixes - -- The retry logic for MSI token requests now uses exponential backoff per the guidelines. -- IsTemporaryNetworkError() will return true for errors that don't implement the net.Error interface. - -## v10.9.0 - -### Deprecated Methods - -| Old Method | New Method | -| -------------------------: | :---------------------------: | -| azure.NewFuture() | azure.NewFutureFromResponse() | -| Future.WaitForCompletion() | Future.WaitForCompletionRef() | - -### New Features - -- Added azure.NewFutureFromResponse() for creating a Future from the initial response from an async operation. -- Added Future.GetResult() for making the final GET call to retrieve the result from an async operation. - -### Bug Fixes - -- Some futures failed to return their results, this should now be fixed. - -## v10.8.2 - -### Bug Fixes - -- Add nil-gaurd to token retry logic. - -## v10.8.1 - -### Bug Fixes - -- Return a TokenRefreshError if the sender fails on the initial request. -- Don't retry on non-temporary network errors. - -## v10.8.0 - -- Added NewAuthorizerFromEnvironmentWithResource() helper function. - -## v10.7.0 - -### New Features - -- Added \*WithContext() methods to ADAL token refresh operations. - -## v10.6.2 - -- Fixed a bug on device authentication. - -## v10.6.1 - -- Added retries to MSI token get request. - -## v10.6.0 - -- Changed MSI token implementation. Now, the token endpoint is the IMDS endpoint. - -## v10.5.1 - -### Bug Fixes - -- `DeviceFlowConfig.Authorizer()` now prints the device code message when running `go test`. `-v` flag is required. - -## v10.5.0 - -### New Features - -- Added NewPollingRequestWithContext() for use with polling asynchronous operations. - -### Bug Fixes - -- Make retry logic use the request's context instead of the deprecated Cancel object. - -## v10.4.0 - -### New Features - -- Added helper for parsing Azure Resource ID's. -- Added deprecation message to utils.GetEnvVarOrExit() - -## v10.3.0 - -### New Features - -- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints -- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint - -## v10.2.0 - -### New Features - -- Added endpoints for batch management. - -## v10.1.3 - -### Bug Fixes - -- In Client.Do() invoke WithInspection() last so that it will inspect WithAuthorization(). -- Fixed authorization methods to invoke p.Prepare() first, aligning them with the other preparers. - -## v10.1.2 - -- Corrected comment for auth.NewAuthorizerFromFile() function. - -## v10.1.1 - -- Updated version number to match current release. - -## v10.1.0 - -### New Features - -- Expose the polling URL for futures. - -### Bug Fixes - -- Add validation.NewErrorWithValidationError back to prevent breaking changes (it is deprecated). - -## v10.0.0 - -### New Features - -- Added target and innererror fields to ServiceError to comply with OData v4 spec. -- The Done() method on futures will now return a ServiceError object when available (it used to return a partial value of such errors). -- Added helper methods for obtaining authorizers. -- Expose the polling URL for futures. - -### Bug Fixes - -- Switched from glide to dep for dependency management. -- Fixed unmarshaling of ServiceError for JSON bodies that don't conform to the OData spec. -- Fixed a race condition in token refresh. - -### Breaking Changes - -- The ServiceError.Details field type has been changed to match the OData v4 spec. -- Go v1.7 has been dropped from CI. -- API parameter validation failures will now return a unique error type validation.Error. -- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race). - -## v9.10.0 - -- Fix the Service Bus suffix in Azure public env -- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control) - -## v9.9.0 - -### New Features - -- Added EventGridKeyAuthorizer for key authorization with event grid topics. - -### Bug Fixes - -- Fixed race condition when auto-refreshing service principal tokens. - -## v9.8.1 - -### Bug Fixes - -- Added http.StatusNoContent (204) to the list of expected status codes for long-running operations. -- Updated runtime version info so it's current. - -## v9.8.0 - -### New Features - -- Added type azure.AsyncOpIncompleteError to be returned from a future's Result() method when the operation has not completed. - -## v9.7.1 - -### Bug Fixes - -- Use correct AAD and Graph endpoints for US Gov environment. - -## v9.7.0 - -### New Features - -- Added support for application/octet-stream MIME types. - -## v9.6.1 - -### Bug Fixes - -- Ensure Authorization header is added to request when polling for registration status. - -## v9.6.0 - -### New Features - -- Added support for acquiring tokens via MSI with a user assigned identity. - -## v9.5.3 - -### Bug Fixes - -- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters. -- Set correct Content Type when using autorest.WithFormData. - -## v9.5.2 - -### Bug Fixes - -- Check for nil \*http.Response before dereferencing it. - -## v9.5.1 - -### Bug Fixes - -- Don't count http.StatusTooManyRequests (429) against the retry cap. -- Use retry logic when SkipResourceProviderRegistration is set to true. - -## v9.5.0 - -### New Features - -- Added support for username + password, API key, authoriazation code and cognitive services authentication. -- Added field SkipResourceProviderRegistration to clients to provide a way to skip auto-registration of RPs. -- Added utility function AsStringSlice() to convert its parameters to a string slice. - -### Bug Fixes - -- When checking for authentication failures look at the error type not the status code as it could vary. - -## v9.4.2 - -### Bug Fixes - -- Validate parameters when creating credentials. -- Don't retry requests if the returned status is a 401 (http.StatusUnauthorized) as it will never succeed. - -## v9.4.1 - -### Bug Fixes - -- Update the AccessTokensPath() to read access tokens path through AZURE_ACCESS_TOKEN_FILE. If this - environment variable is not set, it will fall back to use default path set by Azure CLI. -- Use case-insensitive string comparison for polling states. - -## v9.4.0 - -### New Features - -- Added WaitForCompletion() to Future as a default polling implementation. - -### Bug Fixes - -- Method Future.Done() shouldn't update polling status for unexpected HTTP status codes. - -## v9.3.1 - -### Bug Fixes - -- DoRetryForStatusCodes will retry if sender.Do returns a non-nil error. - -## v9.3.0 - -### New Features - -- Added PollingMethod() to Future so callers know what kind of polling mechanism is used. -- Added azure.ChangeToGet() which transforms an http.Request into a GET (to be used with LROs). - -## v9.2.0 - -### New Features - -- Added support for custom Azure Stack endpoints. -- Added type azure.Future used to track the status of long-running operations. - -### Bug Fixes - -- Preserve the original error in DoRetryWithRegistration when registration fails. - -## v9.1.1 - -- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`. - -## v9.1.0 - -### New Features - -- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error. -- Support for loading Azure CLI Authentication files. -- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously. - -### Bug Fixes - -- RetriableRequest can now tolerate a ReadSeekable body being read but not reset. -- Adding missing Apache Headers - -## v9.0.0 - -> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes. - -Adding MSI Endpoint Support and CLI token rehydration. - -## v8.3.1 - -Pick up bug fix in adal for MSI support. - -## v8.3.0 - -Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience. - -## v8.2.0 - -### New Features - -- Add support for bearer authentication callbacks -- Support 429 response codes that include "Retry-After" header -- Support validation constraint "Pattern" for map keys - -### Bug Fixes - -- Make RetriableRequest work with multiple versions of Go - -## v8.1.1 - -Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. - -## v8.1.0 - -Adds RetriableRequest type for more efficient handling of retrying HTTP requests. - -## v8.0.0 - -ADAL refactored into its own package. -Support for UNIX time. - -## v7.3.1 - -- Version Testing now removed from production bits that are shipped with the library. - -## v7.3.0 - -- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations - to acknowledge that they do not need either the entire or a trailing portion - of accepts response body. In doing so, Go's http library can reuse HTTP - connections more readily. -- Adding `PrepareDecorator` to target custom BaseURLs. -- Adding ACR suffix to public cloud environment. -- Updating Glide dependencies. - -## v7.2.5 - -- Fixed the Active Directory endpoint for the China cloud. -- Removes UTF-8 BOM if present in response payload. -- Added telemetry. - -## v7.2.3 - -- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay - duration. - -## v7.2.2 - -- autorest/azure: added ASM and ARM VM DNS suffixes. - -## v7.2.1 - -- fixed parsing of UTC times that are not RFC3339 conformant. - -## v7.2.0 - -- autorest/validation: Reformat validation error for better error message. - -## v7.1.0 - -- preparer: Added support for multipart formdata - WithMultiPartFormdata() -- preparer: Added support for sending file in request body - WithFile -- client: Added RetryDuration parameter. -- autorest/validation: new package for validation code for Azure Go SDK. - -## v7.0.7 - -- Add trailing / to endpoint -- azure: add EnvironmentFromName - -## v7.0.6 - -- Add retry logic for 408, 500, 502, 503 and 504 status codes. -- Change url path and query encoding logic. -- Fix DelayForBackoff for proper exponential delay. -- Add CookieJar in Client. - -## v7.0.5 - -- Add check to start polling only when status is in [200,201,202]. -- Refactoring for unchecked errors. -- azure/persist changes. -- Fix 'file in use' issue in renewing token in deviceflow. -- Store header RetryAfter for subsequent requests in polling. -- Add attribute details in service error. - -## v7.0.4 - -- Better error messages for long running operation failures - -## v7.0.3 - -- Corrected DoPollForAsynchronous to properly handle the initial response - -## v7.0.2 - -- Corrected DoPollForAsynchronous to continue using the polling method first discovered - -## v7.0.1 - -- Fixed empty JSON input error in ByUnmarshallingJSON -- Fixed polling support for GET calls -- Changed format name from TimeRfc1123 to TimeRFC1123 - -## v7.0.0 - -- Added ByCopying responder with supporting TeeReadCloser -- Rewrote Azure asynchronous handling -- Reverted to only unmarshalling JSON -- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format - -The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since -`encoding/json` successfully deserializes all core types, and extended types normally provide -their custom JSON serialization handlers, the code has been reverted back to using -`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate -code; there is no loss of function, and there is a gain in accuracy, by reverting. - -Additionally, Azure services indicate requests to be polled by multiple means. The existing code -only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). -The new code correctly covers all cases and aligns with the other Azure SDKs. - -## v6.1.0 - -- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. - -## v6.0.0 - -- Completely reworked the handling of polled and asynchronous requests -- Removed unnecessary routines -- Reworked `mocks.Sender` to replay a series of `http.Response` objects -- Added `PrepareDecorators` for primitive types (e.g., bool, int32) - -Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new -`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` -and `azure.DoPollForAsynchronous` for examples. - -## v5.0.0 - -- Added new RespondDecorators unmarshalling primitive types -- Corrected application of inspection and authorization PrependDecorators - -## v4.0.0 - -- Added support for Azure long-running operations. -- Added cancelation support to all decorators and functions that may delay. -- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. - -## v3.1.0 - -- Add support for OAuth Device Flow authorization. -- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. -- Add helpers for persisting and restoring Tokens. -- Increased code coverage in the github.com/Azure/autorest/azure package - -## v3.0.0 - -- Breaking: `NewErrorWithError` no longer takes `statusCode int`. -- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. -- Breaking: `Client#Send()` no longer takes `codes ...int` argument. -- Add: XML unmarshaling support with `ByUnmarshallingXML()` -- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). - Applications using this library should either use Glide or vendor dependencies locally some other way. -- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. -- Fix: use `net/http.DefaultClient` as base client. -- Fix: Missing inspection for polling responses added. -- Add: CopyAndDecode helpers. -- Improved `./autorest/to` with `[]string` helpers. -- Removed golint suppressions in .travis.yml. - -## v2.1.0 - -- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) - -## v2.0.0 - -- Changed `to.StringMapPtr` method signature to return a pointer -- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys - -## v1.0.0 - -- Added Logging inspectors to trace http.Request / Response -- Added support for User-Agent header -- Changed WithHeader PrepareDecorator to use set vs. add -- Added JSON to error when unmarshalling fails -- Added Client#Send method -- Corrected case of "Azure" in package paths -- Added "to" helpers, Azure helpers, and improved ease-of-use -- Corrected golint issues - -## v1.0.1 - -- Added CHANGELOG.md - -## v1.1.0 - -- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT -- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate - -## v1.1.1 - -- Introduce godeps and vendor dependencies introduced in v1.1.1 diff --git a/vendor/github.com/Azure/go-autorest/GNUmakefile b/vendor/github.com/Azure/go-autorest/GNUmakefile deleted file mode 100644 index a434e73ac4..0000000000 --- a/vendor/github.com/Azure/go-autorest/GNUmakefile +++ /dev/null @@ -1,23 +0,0 @@ -DIR?=./autorest/ - -default: build - -build: fmt - go install $(DIR) - -test: - go test $(DIR) || exit 1 - -vet: - @echo "go vet ." - @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \ - echo ""; \ - echo "Vet found suspicious constructs. Please check the reported constructs"; \ - echo "and fix them if necessary before submitting the code for review."; \ - exit 1; \ - fi - -fmt: - gofmt -w $(DIR) - -.PHONY: build test vet fmt diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.lock b/vendor/github.com/Azure/go-autorest/Gopkg.lock deleted file mode 100644 index dc6e3e633e..0000000000 --- a/vendor/github.com/Azure/go-autorest/Gopkg.lock +++ /dev/null @@ -1,324 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e" - name = "contrib.go.opencensus.io/exporter/ocagent" - packages = ["."] - pruneopts = "UT" - revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea" - version = "v0.6.0" - -[[projects]] - digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20" - name = "github.com/census-instrumentation/opencensus-proto" - packages = [ - "gen-go/agent/common/v1", - "gen-go/agent/metrics/v1", - "gen-go/agent/trace/v1", - "gen-go/metrics/v1", - "gen-go/resource/v1", - "gen-go/trace/v1", - ] - pruneopts = "UT" - revision = "d89fa54de508111353cb0b06403c00569be780d8" - version = "v0.2.1" - -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - pruneopts = "UT" - revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" - version = "v3.2.0" - -[[projects]] - digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965" - name = "github.com/dimchansky/utfbom" - packages = ["."] - pruneopts = "UT" - revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" - name = "github.com/golang/groupcache" - packages = ["lru"] - pruneopts = "UT" - revision = "611e8accdfc92c4187d399e95ce826046d4c8d73" - -[[projects]] - digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa" - name = "github.com/golang/protobuf" - packages = [ - "descriptor", - "jsonpb", - "proto", - "protoc-gen-go/descriptor", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/struct", - "ptypes/timestamp", - "ptypes/wrappers", - ] - pruneopts = "UT" - revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" - version = "v1.3.2" - -[[projects]] - digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806" - name = "github.com/grpc-ecosystem/grpc-gateway" - packages = [ - "internal", - "runtime", - "utilities", - ] - pruneopts = "UT" - revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009" - version = "v1.12.1" - -[[projects]] - digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" - name = "github.com/mitchellh/go-homedir" - packages = ["."] - pruneopts = "UT" - revision = "af06845cf3004701891bf4fdb884bfe4920b3727" - version = "v1.1.0" - -[[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "UT" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551" - name = "github.com/stretchr/testify" - packages = [ - "assert", - "require", - ] - pruneopts = "UT" - revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" - version = "v1.4.0" - -[[projects]] - digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71" - name = "go.opencensus.io" - packages = [ - ".", - "internal", - "internal/tagencoding", - "metric/metricdata", - "metric/metricproducer", - "plugin/ocgrpc", - "plugin/ochttp", - "plugin/ochttp/propagation/b3", - "plugin/ochttp/propagation/tracecontext", - "resource", - "stats", - "stats/internal", - "stats/view", - "tag", - "trace", - "trace/internal", - "trace/propagation", - "trace/tracestate", - ] - pruneopts = "UT" - revision = "aad2c527c5defcf89b5afab7f37274304195a6b2" - version = "v0.22.2" - -[[projects]] - branch = "master" - digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae" - name = "golang.org/x/crypto" - packages = [ - "pkcs12", - "pkcs12/internal/rc2", - ] - pruneopts = "UT" - revision = "e9b2fee46413994441b28dfca259d911d963dfed" - -[[projects]] - branch = "master" - digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43" - name = "golang.org/x/lint" - packages = [ - ".", - "golint", - ] - pruneopts = "UT" - revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448" - -[[projects]] - branch = "master" - digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910" - name = "golang.org/x/net" - packages = [ - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "trace", - ] - pruneopts = "UT" - revision = "1ddd1de85cb0337b623b740a609d35817d516a8d" - -[[projects]] - branch = "master" - digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b" - name = "golang.org/x/sync" - packages = ["semaphore"] - pruneopts = "UT" - revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb" - -[[projects]] - branch = "master" - digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8" - name = "golang.org/x/sys" - packages = ["unix"] - pruneopts = "UT" - revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945" - -[[projects]] - digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/language", - "internal/language/compact", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "UT" - revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" - version = "v0.3.2" - -[[projects]] - branch = "master" - digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7" - name = "golang.org/x/tools" - packages = [ - "go/ast/astutil", - "go/gcexportdata", - "go/internal/gcimporter", - "go/types/typeutil", - ] - pruneopts = "UT" - revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42" - -[[projects]] - digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877" - name = "google.golang.org/api" - packages = ["support/bundler"] - pruneopts = "UT" - revision = "8a410c21381766a810817fd6200fce8838ecb277" - version = "v0.14.0" - -[[projects]] - branch = "master" - digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd" - name = "google.golang.org/genproto" - packages = [ - "googleapis/api/httpbody", - "googleapis/rpc/status", - "protobuf/field_mask", - ] - pruneopts = "UT" - revision = "51378566eb590fa106d1025ea12835a4416dda84" - -[[projects]] - digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301" - name = "google.golang.org/grpc" - packages = [ - ".", - "backoff", - "balancer", - "balancer/base", - "balancer/roundrobin", - "binarylog/grpc_binarylog_v1", - "codes", - "connectivity", - "credentials", - "credentials/internal", - "encoding", - "encoding/proto", - "grpclog", - "internal", - "internal/backoff", - "internal/balancerload", - "internal/binarylog", - "internal/buffer", - "internal/channelz", - "internal/envconfig", - "internal/grpcrand", - "internal/grpcsync", - "internal/resolver/dns", - "internal/resolver/passthrough", - "internal/syscall", - "internal/transport", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "serviceconfig", - "stats", - "status", - "tap", - ] - pruneopts = "UT" - revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514" - version = "v1.25.1" - -[[projects]] - digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "UT" - revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce" - version = "v2.2.7" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "contrib.go.opencensus.io/exporter/ocagent", - "github.com/dgrijalva/jwt-go", - "github.com/dimchansky/utfbom", - "github.com/mitchellh/go-homedir", - "github.com/stretchr/testify/require", - "go.opencensus.io/plugin/ochttp", - "go.opencensus.io/plugin/ochttp/propagation/tracecontext", - "go.opencensus.io/stats/view", - "go.opencensus.io/trace", - "golang.org/x/crypto/pkcs12", - "golang.org/x/lint/golint", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.toml b/vendor/github.com/Azure/go-autorest/Gopkg.toml deleted file mode 100644 index 1fc2865969..0000000000 --- a/vendor/github.com/Azure/go-autorest/Gopkg.toml +++ /dev/null @@ -1,59 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - -required = ["golang.org/x/lint/golint"] - -[prune] - go-tests = true - unused-packages = true - -[[constraint]] - name = "contrib.go.opencensus.io/exporter/ocagent" - version = "0.6.0" - -[[constraint]] - name = "github.com/dgrijalva/jwt-go" - version = "3.2.0" - -[[constraint]] - name = "github.com/dimchansky/utfbom" - version = "1.1.0" - -[[constraint]] - name = "github.com/mitchellh/go-homedir" - version = "1.1.0" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "1.3.0" - -[[constraint]] - name = "go.opencensus.io" - version = "0.22.0" - -[[constraint]] - branch = "master" - name = "golang.org/x/crypto" diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE deleted file mode 100644 index b9d6a27ea9..0000000000 --- a/vendor/github.com/Azure/go-autorest/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md deleted file mode 100644 index de1e19a44d..0000000000 --- a/vendor/github.com/Azure/go-autorest/README.md +++ /dev/null @@ -1,165 +0,0 @@ -# go-autorest - -[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) -[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/Azure.go-autorest?branchName=master)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master) -[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) - -Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages. - -An authentication client tested with Azure Active Directory (AAD) is also -provided in this repo in the package -`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package -is maintained only as part of the Azure Go SDK and is not related to other -"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD). - -## Overview - -Package go-autorest implements an HTTP request pipeline suitable for use across -multiple goroutines and provides the shared routines used by packages generated -by [Autorest](https://github.com/Azure/autorest.go). - -The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, -and Responding. A typical pattern is: - -```go - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) - - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) - - err = Respond(resp, - ByDiscardingBody(), - ByClosing()) -``` - -Each phase relies on decorators to modify and / or manage processing. Decorators may first modify -and then pass the data along, pass the data first and then modify the result, or wrap themselves -around passing the data (such as a logger might do). Decorators run in the order provided. For -example, the following: - -```go - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) -``` - -will set the URL to: - -``` - https://microsoft.com/a/b/c -``` - -Preparers and Responders may be shared and re-used (assuming the underlying decorators support -sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders -shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, -all bound together by means of input / output channels. - -Decorators hold their passed state within a closure (such as the path components in the example -above). Be careful to share Preparers and Responders only in a context where such held state -applies. For example, it may not make sense to share a Preparer that applies a query string from a -fixed set of values. Similarly, sharing a Responder that reads the response body into a passed -struct (e.g., `ByUnmarshallingJson`) is likely incorrect. - -Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. - -See the included examples for more detail. For details on the suggested use of this package by -generated clients, see the Client described below. - -## Helpers - -### Handling Swagger Dates - -The Swagger specification (https://swagger.io) that drives AutoRest -(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The -github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct -parsing and formatting. - -### Handling Empty Values - -In JSON, missing values have different semantics than empty values. This is especially true for -services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains -only those values to modify. Missing values are to be left unchanged. Developers, then, require a -means to both specify an empty value and to leave the value out of the submitted JSON. - -The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits -empty values from the rendered JSON. Since Go defines default values for all base types (such as "" -for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package -treats default values as meaning empty, omitting them from the rendered JSON. This means that, using -the Go base types encoded through the default JSON package, it is not possible to create JSON to -clear a value at the server. - -The workaround within the Go community is to use pointers to base types in lieu of base types within -structures that map to JSON. For example, instead of a value of type `string`, the workaround uses -`*string`. While this enables distinguishing empty values from those to be unchanged, creating -pointers to a base type (notably constant, in-line values) requires additional variables. This, for -example, - -```go - s := struct { - S *string - }{ S: &"foo" } -``` -fails, while, this - -```go - v := "foo" - s := struct { - S *string - }{ S: &v } -``` -succeeds. - -To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for -Go base types which have Swagger analogs. It also provides a helper that converts between -`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value -associated with a key should be cleared. With the helpers, the previous example becomes - -```go - s := struct { - S *string - }{ S: to.StringPtr("foo") } -``` - -## Install - -```bash -go get github.com/Azure/go-autorest/autorest -go get github.com/Azure/go-autorest/autorest/azure -go get github.com/Azure/go-autorest/autorest/date -go get github.com/Azure/go-autorest/autorest/to -``` - -### Using with Go Modules -In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules. - -- autorest/adal -- autorest/azure/auth -- autorest/azure/cli -- autorest/date -- autorest/mocks -- autorest/to -- autorest/validation -- autorest -- logger -- tracing - -Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules. - -## License - -See LICENSE file. - ------ - -This project has adopted the [Microsoft Open Source Code of -Conduct](https://opensource.microsoft.com/codeofconduct/). For more information -see the [Code of Conduct -FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact -[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional -questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE deleted file mode 100644 index b9d6a27ea9..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE deleted file mode 100644 index b9d6a27ea9..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md deleted file mode 100644 index b11eb07884..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md +++ /dev/null @@ -1,294 +0,0 @@ -# NOTE: This module will go out of support by March 31, 2023. For authenticating with Azure AD, use module [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) instead. For help migrating from `adal` to `azidentiy` please consult the [migration guide](https://aka.ms/azsdk/go/identity/migration). General information about the retirement of this and other legacy modules can be found [here](https://azure.microsoft.com/updates/support-for-azure-sdk-libraries-that-do-not-conform-to-our-current-azure-sdk-guidelines-will-be-retired-as-of-31-march-2023/). - -# Azure Active Directory authentication for Go - -This is a standalone package for authenticating with Azure Active -Directory from other Go libraries and applications, in particular the [Azure SDK -for Go](https://github.com/Azure/azure-sdk-for-go). - -Note: Despite the package's name it is not related to other "ADAL" libraries -maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues -should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) -or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue -trackers. - -## Install - -```bash -go get -u github.com/Azure/go-autorest/autorest/adal -``` - -## Usage - -An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). - -### Register an Azure AD Application with secret - - -1. Register a new application with a `secret` credential - - ``` - az ad app create \ - --display-name example-app \ - --homepage https://example-app/home \ - --identifier-uris https://example-app/app \ - --password secret - ``` - -2. Create a service principal using the `Application ID` from previous step - - ``` - az ad sp create --id "Application ID" - ``` - - * Replace `Application ID` with `appId` from step 1. - -### Register an Azure AD Application with certificate - -1. Create a private key - - ``` - openssl genrsa -out "example-app.key" 2048 - ``` - -2. Create the certificate - - ``` - openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" - openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 - ``` - -3. Create the PKCS12 version of the certificate containing also the private key - - ``` - openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: - - ``` - -4. Register a new application with the certificate content form `example-app.crt` - - ``` - certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" - - az ad app create \ - --display-name example-app \ - --homepage https://example-app/home \ - --identifier-uris https://example-app/app \ - --key-usage Verify --end-date 2018-01-01 \ - --key-value "${certificateContents}" - ``` - -5. Create a service principal using the `Application ID` from previous step - - ``` - az ad sp create --id "APPLICATION_ID" - ``` - - * Replace `APPLICATION_ID` with `appId` from step 4. - - -### Grant the necessary permissions - -Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained -level. There is a set of [pre-defined roles](https://docs.microsoft.com/azure/active-directory/role-based-access-built-in-roles) -which can be assigned to a service principal of an Azure AD application depending of your needs. - -``` -az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" -``` - -* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. -* Replace the `ROLE_NAME` with a role name of your choice. - -It is also possible to define custom role definitions. - -``` -az role definition create --role-definition role-definition.json -``` - -* Check [custom roles](https://docs.microsoft.com/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. - - -### Acquire Access Token - -The common configuration used by all flows: - -```Go -const activeDirectoryEndpoint = "https://login.microsoftonline.com/" -tenantID := "TENANT_ID" -oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) - -applicationID := "APPLICATION_ID" - -callback := func(token adal.Token) error { - // This is called after the token is acquired -} - -// The resource for which the token is acquired -resource := "https://management.core.windows.net/" -``` - -* Replace the `TENANT_ID` with your tenant ID. -* Replace the `APPLICATION_ID` with the value from previous section. - -#### Client Credentials - -```Go -applicationSecret := "APPLICATION_SECRET" - -spt, err := adal.NewServicePrincipalToken( - *oauthConfig, - appliationID, - applicationSecret, - resource, - callbacks...) -if err != nil { - return nil, err -} - -// Acquire a new access token -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -* Replace the `APPLICATION_SECRET` with the `password` value from previous section. - -#### Client Certificate - -```Go -certificatePath := "./example-app.pfx" - -certData, err := ioutil.ReadFile(certificatePath) -if err != nil { - return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) -} - -// Get the certificate and private key from pfx file -certificate, rsaPrivateKey, err := decodePkcs12(certData, "") -if err != nil { - return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) -} - -spt, err := adal.NewServicePrincipalTokenFromCertificate( - *oauthConfig, - applicationID, - certificate, - rsaPrivateKey, - resource, - callbacks...) - -// Acquire a new access token -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -* Update the certificate path to point to the example-app.pfx file which was created in previous section. - - -#### Device Code - -```Go -oauthClient := &http.Client{} - -// Acquire the device code -deviceCode, err := adal.InitiateDeviceAuth( - oauthClient, - *oauthConfig, - applicationID, - resource) -if err != nil { - return nil, fmt.Errorf("Failed to start device auth flow: %s", err) -} - -// Display the authentication message -fmt.Println(*deviceCode.Message) - -// Wait here until the user is authenticated -token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) -if err != nil { - return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) -} - -spt, err := adal.NewServicePrincipalTokenFromManualToken( - *oauthConfig, - applicationID, - resource, - *token, - callbacks...) - -if (err == nil) { - token := spt.Token -} -``` - -#### Username password authenticate - -```Go -spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( - *oauthConfig, - applicationID, - username, - password, - resource, - callbacks...) - -if (err == nil) { - token := spt.Token -} -``` - -#### Authorization code authenticate - -``` Go -spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( - *oauthConfig, - applicationID, - clientSecret, - authorizationCode, - redirectURI, - resource, - callbacks...) - -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -### Command Line Tool - -A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. - -``` -adal -h - -Usage of ./adal: - -applicationId string - application id - -certificatePath string - path to pk12/PFC application certificate - -mode string - authentication mode (device, secret, cert, refresh) (default "device") - -resource string - resource for which the token is requested - -secret string - application secret - -tenantId string - tenant id - -tokenCachePath string - location of oath token cache (default "/home/cgc/.adal/accessToken.json") -``` - -Example acquire a token for `https://management.core.windows.net/` using device code flow: - -``` -adal -mode device \ - -applicationId "APPLICATION_ID" \ - -tenantId "TENANT_ID" \ - -resource https://management.core.windows.net/ - -``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go deleted file mode 100644 index fa5964742f..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ /dev/null @@ -1,151 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "net/url" -) - -const ( - activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" -) - -// OAuthConfig represents the endpoints needed -// in OAuth operations -type OAuthConfig struct { - AuthorityEndpoint url.URL `json:"authorityEndpoint"` - AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` - TokenEndpoint url.URL `json:"tokenEndpoint"` - DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` -} - -// IsZero returns true if the OAuthConfig object is zero-initialized. -func (oac OAuthConfig) IsZero() bool { - return oac == OAuthConfig{} -} - -func validateStringParam(param, name string) error { - if len(param) == 0 { - return fmt.Errorf("parameter '" + name + "' cannot be empty") - } - return nil -} - -// NewOAuthConfig returns an OAuthConfig with tenant specific urls -func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { - apiVer := "1.0" - return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) -} - -// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. -// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. -func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { - if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { - return nil, err - } - api := "" - // it's legal for tenantID to be empty so don't validate it - if apiVersion != nil { - if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { - return nil, err - } - api = fmt.Sprintf("?api-version=%s", *apiVersion) - } - u, err := url.Parse(activeDirectoryEndpoint) - if err != nil { - return nil, err - } - authorityURL, err := u.Parse(tenantID) - if err != nil { - return nil, err - } - authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) - if err != nil { - return nil, err - } - tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) - if err != nil { - return nil, err - } - deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) - if err != nil { - return nil, err - } - - return &OAuthConfig{ - AuthorityEndpoint: *authorityURL, - AuthorizeEndpoint: *authorizeURL, - TokenEndpoint: *tokenURL, - DeviceCodeEndpoint: *deviceCodeURL, - }, nil -} - -// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. -type MultiTenantOAuthConfig interface { - PrimaryTenant() *OAuthConfig - AuxiliaryTenants() []*OAuthConfig -} - -// OAuthOptions contains optional OAuthConfig creation arguments. -type OAuthOptions struct { - APIVersion string -} - -func (c OAuthOptions) apiVersion() string { - if c.APIVersion != "" { - return fmt.Sprintf("?api-version=%s", c.APIVersion) - } - return "1.0" -} - -// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. -// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. -func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { - if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { - return nil, errors.New("must specify one to three auxiliary tenants") - } - mtCfg := multiTenantOAuthConfig{ - cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), - } - apiVer := options.apiVersion() - pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) - if err != nil { - return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) - } - mtCfg.cfgs[0] = pri - for i := range auxiliaryTenantIDs { - aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) - if err != nil { - return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) - } - mtCfg.cfgs[i+1] = aux - } - return mtCfg, nil -} - -type multiTenantOAuthConfig struct { - // first config in the slice is the primary tenant - cfgs []*OAuthConfig -} - -func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { - return m.cfgs[0] -} - -func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { - return m.cfgs[1:] -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go deleted file mode 100644 index 9daa4b58b8..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go +++ /dev/null @@ -1,273 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* - This file is largely based on rjw57/oauth2device's code, with the follow differences: - * scope -> resource, and only allow a single one - * receive "Message" in the DeviceCode struct and show it to users as the prompt - * azure-xplat-cli has the following behavior that this emulates: - - does not send client_secret during the token exchange - - sends resource again in the token exchange request -*/ - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" -) - -const ( - logPrefix = "autorest/adal/devicetoken:" -) - -var ( - // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow - ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) - - // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow - ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) - - // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow - ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) - - // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow - ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) - - // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow - ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) - - // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow - ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) - - // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow - ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) - - errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" - errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" - errTokenSendingFails = "Error occurred while sending request with device code for a token" - errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" - errStatusNotOK = "Error HTTP status != 200" -) - -// DeviceCode is the object returned by the device auth endpoint -// It contains information to instruct the user to complete the auth flow -type DeviceCode struct { - DeviceCode *string `json:"device_code,omitempty"` - UserCode *string `json:"user_code,omitempty"` - VerificationURL *string `json:"verification_url,omitempty"` - ExpiresIn *int64 `json:"expires_in,string,omitempty"` - Interval *int64 `json:"interval,string,omitempty"` - - Message *string `json:"message"` // Azure specific - Resource string // store the following, stored when initiating, used when exchanging - OAuthConfig OAuthConfig - ClientID string -} - -// TokenError is the object returned by the token exchange endpoint -// when something is amiss -type TokenError struct { - Error *string `json:"error,omitempty"` - ErrorCodes []int `json:"error_codes,omitempty"` - ErrorDescription *string `json:"error_description,omitempty"` - Timestamp *string `json:"timestamp,omitempty"` - TraceID *string `json:"trace_id,omitempty"` -} - -// DeviceToken is the object return by the token exchange endpoint -// It can either look like a Token or an ErrorToken, so put both here -// and check for presence of "Error" to know if we are in error state -type deviceToken struct { - Token - TokenError -} - -// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode -// that can be used with CheckForUserCompletion or WaitForUserCompletion. -// Deprecated: use InitiateDeviceAuthWithContext() instead. -func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { - return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource) -} - -// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode -// that can be used with CheckForUserCompletion or WaitForUserCompletion. -func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { - v := url.Values{ - "client_id": []string{clientID}, - "resource": []string{resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req.WithContext(ctx)) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) - } - - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrDeviceCodeEmpty - } - - var code DeviceCode - err = json.Unmarshal(rb, &code) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - code.ClientID = clientID - code.Resource = resource - code.OAuthConfig = oauthConfig - - return &code, nil -} - -// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint -// to see if the device flow has: been completed, timed out, or otherwise failed -// Deprecated: use CheckForUserCompletionWithContext() instead. -func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - return CheckForUserCompletionWithContext(context.Background(), sender, code) -} - -// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint -// to see if the device flow has: been completed, timed out, or otherwise failed -func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { - v := url.Values{ - "client_id": []string{code.ClientID}, - "code": []string{*code.DeviceCode}, - "grant_type": []string{OAuthGrantTypeDeviceCode}, - "resource": []string{code.Resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req.WithContext(ctx)) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrOAuthTokenEmpty - } - - var token deviceToken - err = json.Unmarshal(rb, &token) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if token.Error == nil { - return &token.Token, nil - } - - switch *token.Error { - case "authorization_pending": - return nil, ErrDeviceAuthorizationPending - case "slow_down": - return nil, ErrDeviceSlowDown - case "access_denied": - return nil, ErrDeviceAccessDenied - case "code_expired": - return nil, ErrDeviceCodeExpired - default: - // return a more meaningful error message if available - if token.ErrorDescription != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, *token.Error, *token.ErrorDescription) - } - return nil, ErrDeviceGeneric - } -} - -// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. -// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. -// Deprecated: use WaitForUserCompletionWithContext() instead. -func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - return WaitForUserCompletionWithContext(context.Background(), sender, code) -} - -// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error -// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. -func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { - intervalDuration := time.Duration(*code.Interval) * time.Second - waitDuration := intervalDuration - - for { - token, err := CheckForUserCompletionWithContext(ctx, sender, code) - - if err == nil { - return token, nil - } - - switch err { - case ErrDeviceSlowDown: - waitDuration += waitDuration - case ErrDeviceAuthorizationPending: - // noop - default: // everything else is "fatal" to us - return nil, err - } - - if waitDuration > (intervalDuration * 3) { - return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) - } - - select { - case <-time.After(waitDuration): - // noop - case <-ctx.Done(): - return nil, ctx.Err() - } - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go deleted file mode 100644 index 647a61bb8c..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build modhack -// +build modhack - -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go deleted file mode 100644 index 2a974a39b3..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go +++ /dev/null @@ -1,135 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "golang.org/x/crypto/pkcs12" -) - -var ( - // ErrMissingCertificate is returned when no local certificate is found in the provided PFX data. - ErrMissingCertificate = errors.New("adal: certificate missing") - - // ErrMissingPrivateKey is returned when no private key is found in the provided PFX data. - ErrMissingPrivateKey = errors.New("adal: private key missing") -) - -// LoadToken restores a Token object from a file located at 'path'. -func LoadToken(path string) (*Token, error) { - file, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) - } - defer file.Close() - - var token Token - - dec := json.NewDecoder(file) - if err = dec.Decode(&token); err != nil { - return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) - } - return &token, nil -} - -// SaveToken persists an oauth token at the given location on disk. -// It moves the new file into place so it can safely be used to replace an existing file -// that maybe accessed by multiple processes. -func SaveToken(path string, mode os.FileMode, token Token) error { - dir := filepath.Dir(path) - err := os.MkdirAll(dir, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) - } - - newFile, err := ioutil.TempFile(dir, "token") - if err != nil { - return fmt.Errorf("failed to create the temp file to write the token: %v", err) - } - tempPath := newFile.Name() - - if err := json.NewEncoder(newFile).Encode(token); err != nil { - return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) - } - if err := newFile.Close(); err != nil { - return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) - } - - // Atomic replace to avoid multi-writer file corruptions - if err := os.Rename(tempPath, path); err != nil { - return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) - } - if err := os.Chmod(path, mode); err != nil { - return fmt.Errorf("failed to chmod the token file %s: %v", path, err) - } - return nil -} - -// DecodePfxCertificateData extracts the x509 certificate and RSA private key from the provided PFX data. -// The PFX data must contain a private key along with a certificate whose public key matches that of the -// private key or an error is returned. -// If the private key is not password protected pass the empty string for password. -func DecodePfxCertificateData(pfxData []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { - blocks, err := pkcs12.ToPEM(pfxData, password) - if err != nil { - return nil, nil, err - } - // first extract the private key - var priv *rsa.PrivateKey - for _, block := range blocks { - if block.Type == "PRIVATE KEY" { - priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return nil, nil, err - } - break - } - } - if priv == nil { - return nil, nil, ErrMissingPrivateKey - } - // now find the certificate with the matching public key of our private key - var cert *x509.Certificate - for _, block := range blocks { - if block.Type == "CERTIFICATE" { - pcert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, nil, err - } - certKey, ok := pcert.PublicKey.(*rsa.PublicKey) - if !ok { - // keep looking - continue - } - if priv.E == certKey.E && priv.N.Cmp(certKey.N) == 0 { - // found a match - cert = pcert - break - } - } - } - if cert == nil { - return nil, nil, ErrMissingCertificate - } - return cert, priv, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go deleted file mode 100644 index eb649bce9f..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go +++ /dev/null @@ -1,101 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "crypto/tls" - "net" - "net/http" - "net/http/cookiejar" - "sync" - "time" - - "github.com/Azure/go-autorest/tracing" -) - -const ( - contentType = "Content-Type" - mimeTypeFormPost = "application/x-www-form-urlencoded" -) - -// DO NOT ACCESS THIS DIRECTLY. go through sender() -var defaultSender Sender -var defaultSenderInit = &sync.Once{} - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(sender(), decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} - -func sender() Sender { - // note that we can't init defaultSender in init() since it will - // execute before calling code has had a chance to enable tracing - defaultSenderInit.Do(func() { - // copied from http.DefaultTransport with a TLS minimum version. - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - ForceAttemptHTTP2: true, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - }, - } - var roundTripper http.RoundTripper = transport - if tracing.IsEnabled() { - roundTripper = tracing.NewTransport(transport) - } - j, _ := cookiejar.New(nil) - defaultSender = &http.Client{Jar: j, Transport: roundTripper} - }) - return defaultSender -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go deleted file mode 100644 index 2a24ab80cf..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ /dev/null @@ -1,1430 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "math" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/logger" - "github.com/golang-jwt/jwt/v4" -) - -const ( - defaultRefresh = 5 * time.Minute - - // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow - OAuthGrantTypeDeviceCode = "device_code" - - // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows - OAuthGrantTypeClientCredentials = "client_credentials" - - // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows - OAuthGrantTypeUserPass = "password" - - // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows - OAuthGrantTypeRefreshToken = "refresh_token" - - // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows - OAuthGrantTypeAuthorizationCode = "authorization_code" - - // metadataHeader is the header required by MSI extension - metadataHeader = "Metadata" - - // msiEndpoint is the well known endpoint for getting MSI authentications tokens - msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" - - // the API version to use for the MSI endpoint - msiAPIVersion = "2018-02-01" - - // the default number of attempts to refresh an MSI authentication token - defaultMaxMSIRefreshAttempts = 5 - - // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions - msiEndpointEnv = "MSI_ENDPOINT" - - // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions - msiSecretEnv = "MSI_SECRET" - - // the API version to use for the legacy App Service MSI endpoint - appServiceAPIVersion2017 = "2017-09-01" - - // secret header used when authenticating against app service MSI endpoint - secretHeader = "Secret" - - // the format for expires_on in UTC with AM/PM - expiresOnDateFormatPM = "1/2/2006 15:04:05 PM +00:00" - - // the format for expires_on in UTC without AM/PM - expiresOnDateFormat = "1/2/2006 15:04:05 +00:00" -) - -// OAuthTokenProvider is an interface which should be implemented by an access token retriever -type OAuthTokenProvider interface { - OAuthToken() string -} - -// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. -type MultitenantOAuthTokenProvider interface { - PrimaryOAuthToken() string - AuxiliaryOAuthTokens() []string -} - -// TokenRefreshError is an interface used by errors returned during token refresh. -type TokenRefreshError interface { - error - Response() *http.Response -} - -// Refresher is an interface for token refresh functionality -type Refresher interface { - Refresh() error - RefreshExchange(resource string) error - EnsureFresh() error -} - -// RefresherWithContext is an interface for token refresh functionality -type RefresherWithContext interface { - RefreshWithContext(ctx context.Context) error - RefreshExchangeWithContext(ctx context.Context, resource string) error - EnsureFreshWithContext(ctx context.Context) error -} - -// TokenRefreshCallback is the type representing callbacks that will be called after -// a successful token refresh -type TokenRefreshCallback func(Token) error - -// TokenRefresh is a type representing a custom callback to refresh a token -type TokenRefresh func(ctx context.Context, resource string) (*Token, error) - -// JWTCallback is the type representing callback that will be called to get the federated OIDC JWT -type JWTCallback func() (string, error) - -// Token encapsulates the access token used to authorize Azure requests. -// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response -type Token struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - - ExpiresIn json.Number `json:"expires_in"` - ExpiresOn json.Number `json:"expires_on"` - NotBefore json.Number `json:"not_before"` - - Resource string `json:"resource"` - Type string `json:"token_type"` -} - -func newToken() Token { - return Token{ - ExpiresIn: "0", - ExpiresOn: "0", - NotBefore: "0", - } -} - -// IsZero returns true if the token object is zero-initialized. -func (t Token) IsZero() bool { - return t == Token{} -} - -// Expires returns the time.Time when the Token expires. -func (t Token) Expires() time.Time { - s, err := t.ExpiresOn.Float64() - if err != nil { - s = -3600 - } - - expiration := date.NewUnixTimeFromSeconds(s) - - return time.Time(expiration).UTC() -} - -// IsExpired returns true if the Token is expired, false otherwise. -func (t Token) IsExpired() bool { - return t.WillExpireIn(0) -} - -// WillExpireIn returns true if the Token will expire after the passed time.Duration interval -// from now, false otherwise. -func (t Token) WillExpireIn(d time.Duration) bool { - return !t.Expires().After(time.Now().Add(d)) -} - -// OAuthToken return the current access token -func (t *Token) OAuthToken() string { - return t.AccessToken -} - -// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form -// that is submitted when acquiring an oAuth token. -type ServicePrincipalSecret interface { - SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error -} - -// ServicePrincipalNoSecret represents a secret type that contains no secret -// meaning it is not valid for fetching a fresh token. This is used by Manual -type ServicePrincipalNoSecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret -// It only returns an error for the ServicePrincipalNoSecret type -func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") -} - -// MarshalJSON implements the json.Marshaler interface. -func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalNoSecret", - }) -} - -// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. -type ServicePrincipalTokenSecret struct { - ClientSecret string `json:"value"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using the client_secret. -func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("client_secret", tokenSecret.ClientSecret) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Value string `json:"value"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalTokenSecret", - Value: tokenSecret.ClientSecret, - }) -} - -// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. -type ServicePrincipalCertificateSecret struct { - Certificate *x509.Certificate - PrivateKey *rsa.PrivateKey -} - -// SignJwt returns the JWT signed with the certificate's private key. -func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { - hasher := sha1.New() - _, err := hasher.Write(secret.Certificate.Raw) - if err != nil { - return "", err - } - - thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) - - // The jti (JWT ID) claim provides a unique identifier for the JWT. - jti := make([]byte, 20) - _, err = rand.Read(jti) - if err != nil { - return "", err - } - - token := jwt.New(jwt.SigningMethodRS256) - token.Header["x5t"] = thumbprint - x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} - token.Header["x5c"] = x5c - token.Claims = jwt.MapClaims{ - "aud": spt.inner.OauthConfig.TokenEndpoint.String(), - "iss": spt.inner.ClientID, - "sub": spt.inner.ClientID, - "jti": base64.URLEncoding.EncodeToString(jti), - "nbf": time.Now().Unix(), - "exp": time.Now().Add(24 * time.Hour).Unix(), - } - - signedString, err := token.SignedString(secret.PrivateKey) - return signedString, err -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. -func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - jwt, err := secret.SignJwt(spt) - if err != nil { - return err - } - - v.Set("client_assertion", jwt) - v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") -} - -// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. -type ServicePrincipalMSISecret struct { - msiType msiType - clientResourceID string -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") -} - -// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. -type ServicePrincipalUsernamePasswordSecret struct { - Username string `json:"username"` - Password string `json:"password"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("username", secret.Username) - v.Set("password", secret.Password) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Username string `json:"username"` - Password string `json:"password"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalUsernamePasswordSecret", - Username: secret.Username, - Password: secret.Password, - }) -} - -// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. -type ServicePrincipalAuthorizationCodeSecret struct { - ClientSecret string `json:"value"` - AuthorizationCode string `json:"authCode"` - RedirectURI string `json:"redirect"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("code", secret.AuthorizationCode) - v.Set("client_secret", secret.ClientSecret) - v.Set("redirect_uri", secret.RedirectURI) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Value string `json:"value"` - AuthCode string `json:"authCode"` - Redirect string `json:"redirect"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalAuthorizationCodeSecret", - Value: secret.ClientSecret, - AuthCode: secret.AuthorizationCode, - Redirect: secret.RedirectURI, - }) -} - -// ServicePrincipalFederatedSecret implements ServicePrincipalSecret for Federated JWTs. -type ServicePrincipalFederatedSecret struct { - jwtCallback JWTCallback -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during OAuth Token Acquisition using a JWT signed by an OIDC issuer. -func (secret *ServicePrincipalFederatedSecret) SetAuthenticationValues(_ *ServicePrincipalToken, v *url.Values) error { - jwt, err := secret.jwtCallback() - if err != nil { - return err - } - - v.Set("client_assertion", jwt) - v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalFederatedSecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalFederatedSecret is not supported") -} - -// ServicePrincipalToken encapsulates a Token created for a Service Principal. -type ServicePrincipalToken struct { - inner servicePrincipalToken - refreshLock *sync.RWMutex - sender Sender - customRefreshFunc TokenRefresh - refreshCallbacks []TokenRefreshCallback - // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. - // Settings this to a value less than 1 will use the default value. - MaxMSIRefreshAttempts int -} - -// MarshalTokenJSON returns the marshalled inner token. -func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { - return json.Marshal(spt.inner.Token) -} - -// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. -func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { - spt.refreshCallbacks = callbacks -} - -// SetCustomRefreshFunc sets a custom refresh function used to refresh the token. -func (spt *ServicePrincipalToken) SetCustomRefreshFunc(customRefreshFunc TokenRefresh) { - spt.customRefreshFunc = customRefreshFunc -} - -// MarshalJSON implements the json.Marshaler interface. -func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { - return json.Marshal(spt.inner) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { - // need to determine the token type - raw := map[string]interface{}{} - err := json.Unmarshal(data, &raw) - if err != nil { - return err - } - secret := raw["secret"].(map[string]interface{}) - switch secret["type"] { - case "ServicePrincipalNoSecret": - spt.inner.Secret = &ServicePrincipalNoSecret{} - case "ServicePrincipalTokenSecret": - spt.inner.Secret = &ServicePrincipalTokenSecret{} - case "ServicePrincipalCertificateSecret": - return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") - case "ServicePrincipalMSISecret": - return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") - case "ServicePrincipalUsernamePasswordSecret": - spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} - case "ServicePrincipalAuthorizationCodeSecret": - spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} - case "ServicePrincipalFederatedSecret": - return errors.New("unmarshalling ServicePrincipalFederatedSecret is not supported") - default: - return fmt.Errorf("unrecognized token type '%s'", secret["type"]) - } - err = json.Unmarshal(data, &spt.inner) - if err != nil { - return err - } - // Don't override the refreshLock or the sender if those have been already set. - if spt.refreshLock == nil { - spt.refreshLock = &sync.RWMutex{} - } - if spt.sender == nil { - spt.sender = sender() - } - return nil -} - -// internal type used for marshalling/unmarshalling -type servicePrincipalToken struct { - Token Token `json:"token"` - Secret ServicePrincipalSecret `json:"secret"` - OauthConfig OAuthConfig `json:"oauth"` - ClientID string `json:"clientID"` - Resource string `json:"resource"` - AutoRefresh bool `json:"autoRefresh"` - RefreshWithin time.Duration `json:"refreshWithin"` -} - -func validateOAuthConfig(oac OAuthConfig) error { - if oac.IsZero() { - return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") - } - return nil -} - -// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. -func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(id, "id"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if secret == nil { - return nil, fmt.Errorf("parameter 'secret' cannot be nil") - } - spt := &ServicePrincipalToken{ - inner: servicePrincipalToken{ - Token: newToken(), - OauthConfig: oauthConfig, - Secret: secret, - ClientID: id, - Resource: resource, - AutoRefresh: true, - RefreshWithin: defaultRefresh, - }, - refreshLock: &sync.RWMutex{}, - sender: sender(), - refreshCallbacks: callbacks, - } - return spt, nil -} - -// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token -func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if token.IsZero() { - return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") - } - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalNoSecret{}, - callbacks...) - if err != nil { - return nil, err - } - - spt.inner.Token = token - - return spt, nil -} - -// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret -func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if secret == nil { - return nil, fmt.Errorf("parameter 'secret' cannot be nil") - } - if token.IsZero() { - return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") - } - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - secret, - callbacks...) - if err != nil { - return nil, err - } - - spt.inner.Token = token - - return spt, nil -} - -// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal -// credentials scoped to the named resource. -func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(secret, "secret"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalTokenSecret{ - ClientSecret: secret, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. -func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if certificate == nil { - return nil, fmt.Errorf("parameter 'certificate' cannot be nil") - } - if privateKey == nil { - return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalCertificateSecret{ - PrivateKey: privateKey, - Certificate: certificate, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. -func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(username, "username"); err != nil { - return nil, err - } - if err := validateStringParam(password, "password"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalUsernamePasswordSecret{ - Username: username, - Password: password, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the -func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(clientSecret, "clientSecret"); err != nil { - return nil, err - } - if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { - return nil, err - } - if err := validateStringParam(redirectURI, "redirectURI"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalAuthorizationCodeSecret{ - ClientSecret: clientSecret, - AuthorizationCode: authorizationCode, - RedirectURI: redirectURI, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromFederatedToken creates a ServicePrincipalToken from the supplied federated OIDC JWT. -// -// Deprecated: Use NewServicePrincipalTokenFromFederatedTokenWithCallback to refresh jwt dynamically. -func NewServicePrincipalTokenFromFederatedToken(oauthConfig OAuthConfig, clientID string, jwt string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if jwt == "" { - return nil, fmt.Errorf("parameter 'jwt' cannot be empty") - } - return NewServicePrincipalTokenFromFederatedTokenCallback( - oauthConfig, - clientID, - func() (string, error) { - return jwt, nil - }, - resource, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromFederatedTokenCallback creates a ServicePrincipalToken from the supplied federated OIDC JWTCallback. -func NewServicePrincipalTokenFromFederatedTokenCallback(oauthConfig OAuthConfig, clientID string, jwtCallback JWTCallback, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if jwtCallback == nil { - return nil, fmt.Errorf("parameter 'jwtCallback' cannot be empty") - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalFederatedSecret{ - jwtCallback: jwtCallback, - }, - callbacks..., - ) -} - -type msiType int - -const ( - msiTypeUnavailable msiType = iota - msiTypeAppServiceV20170901 - msiTypeCloudShell - msiTypeIMDS -) - -func (m msiType) String() string { - switch m { - case msiTypeAppServiceV20170901: - return "AppServiceV20170901" - case msiTypeCloudShell: - return "CloudShell" - case msiTypeIMDS: - return "IMDS" - default: - return fmt.Sprintf("unhandled MSI type %d", m) - } -} - -// returns the MSI type and endpoint, or an error -func getMSIType() (msiType, string, error) { - if endpointEnvVar := os.Getenv(msiEndpointEnv); endpointEnvVar != "" { - // if the env var MSI_ENDPOINT is set - if secretEnvVar := os.Getenv(msiSecretEnv); secretEnvVar != "" { - // if BOTH the env vars MSI_ENDPOINT and MSI_SECRET are set the msiType is AppService - return msiTypeAppServiceV20170901, endpointEnvVar, nil - } - // if ONLY the env var MSI_ENDPOINT is set the msiType is CloudShell - return msiTypeCloudShell, endpointEnvVar, nil - } - // if MSI_ENDPOINT is NOT set assume the msiType is IMDS - return msiTypeIMDS, msiEndpoint, nil -} - -// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. -// NOTE: this always returns the IMDS endpoint, it does not work for app services or cloud shell. -// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. -func GetMSIVMEndpoint() (string, error) { - return msiEndpoint, nil -} - -// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions. -// It will return an error when not running in an app service/functions environment. -// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. -func GetMSIAppServiceEndpoint() (string, error) { - msiType, endpoint, err := getMSIType() - if err != nil { - return "", err - } - switch msiType { - case msiTypeAppServiceV20170901: - return endpoint, nil - default: - return "", fmt.Errorf("%s is not app service environment", msiType) - } -} - -// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment -// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. -func GetMSIEndpoint() (string, error) { - _, endpoint, err := getMSIType() - return endpoint, err -} - -// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the system assigned identity when creating the token. -// msiEndpoint - empty string, or pass a non-empty string to override the default value. -// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. -func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", "", callbacks...) -} - -// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the clientID of specified user assigned identity when creating the token. -// msiEndpoint - empty string, or pass a non-empty string to override the default value. -// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. -func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateStringParam(userAssignedID, "userAssignedID"); err != nil { - return nil, err - } - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, "", callbacks...) -} - -// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the azure resource id of user assigned identity when creating the token. -// msiEndpoint - empty string, or pass a non-empty string to override the default value. -// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. -func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateStringParam(identityResourceID, "identityResourceID"); err != nil { - return nil, err - } - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", identityResourceID, callbacks...) -} - -// ManagedIdentityOptions contains optional values for configuring managed identity authentication. -type ManagedIdentityOptions struct { - // ClientID is the user-assigned identity to use during authentication. - // It is mutually exclusive with IdentityResourceID. - ClientID string - - // IdentityResourceID is the resource ID of the user-assigned identity to use during authentication. - // It is mutually exclusive with ClientID. - IdentityResourceID string -} - -// NewServicePrincipalTokenFromManagedIdentity creates a ServicePrincipalToken using a managed identity. -// It supports the following managed identity environments. -// - App Service Environment (API version 2017-09-01 only) -// - Cloud shell -// - IMDS with a system or user assigned identity -func NewServicePrincipalTokenFromManagedIdentity(resource string, options *ManagedIdentityOptions, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if options == nil { - options = &ManagedIdentityOptions{} - } - return newServicePrincipalTokenFromMSI("", resource, options.ClientID, options.IdentityResourceID, callbacks...) -} - -func newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if userAssignedID != "" && identityResourceID != "" { - return nil, errors.New("cannot specify userAssignedID and identityResourceID") - } - msiType, endpoint, err := getMSIType() - if err != nil { - logger.Instance.Writef(logger.LogError, "Error determining managed identity environment: %v\n", err) - return nil, err - } - logger.Instance.Writef(logger.LogInfo, "Managed identity environment is %s, endpoint is %s\n", msiType, endpoint) - if msiEndpoint != "" { - endpoint = msiEndpoint - logger.Instance.Writef(logger.LogInfo, "Managed identity custom endpoint is %s\n", endpoint) - } - msiEndpointURL, err := url.Parse(endpoint) - if err != nil { - return nil, err - } - // cloud shell sends its data in the request body - if msiType != msiTypeCloudShell { - v := url.Values{} - v.Set("resource", resource) - clientIDParam := "client_id" - switch msiType { - case msiTypeAppServiceV20170901: - clientIDParam = "clientid" - v.Set("api-version", appServiceAPIVersion2017) - break - case msiTypeIMDS: - v.Set("api-version", msiAPIVersion) - } - if userAssignedID != "" { - v.Set(clientIDParam, userAssignedID) - } else if identityResourceID != "" { - v.Set("mi_res_id", identityResourceID) - } - msiEndpointURL.RawQuery = v.Encode() - } - - spt := &ServicePrincipalToken{ - inner: servicePrincipalToken{ - Token: newToken(), - OauthConfig: OAuthConfig{ - TokenEndpoint: *msiEndpointURL, - }, - Secret: &ServicePrincipalMSISecret{ - msiType: msiType, - clientResourceID: identityResourceID, - }, - Resource: resource, - AutoRefresh: true, - RefreshWithin: defaultRefresh, - ClientID: userAssignedID, - }, - refreshLock: &sync.RWMutex{}, - sender: sender(), - refreshCallbacks: callbacks, - MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, - } - - return spt, nil -} - -// internal type that implements TokenRefreshError -type tokenRefreshError struct { - message string - resp *http.Response -} - -// Error implements the error interface which is part of the TokenRefreshError interface. -func (tre tokenRefreshError) Error() string { - return tre.message -} - -// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. -func (tre tokenRefreshError) Response() *http.Response { - return tre.resp -} - -func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { - return tokenRefreshError{message: message, resp: resp} -} - -// EnsureFresh will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (spt *ServicePrincipalToken) EnsureFresh() error { - return spt.EnsureFreshWithContext(context.Background()) -} - -// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { - // must take the read lock when initially checking the token's expiration - if spt.inner.AutoRefresh && spt.Token().WillExpireIn(spt.inner.RefreshWithin) { - // take the write lock then check again to see if the token was already refreshed - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { - return spt.refreshInternal(ctx, spt.inner.Resource) - } - } - return nil -} - -// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization -func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { - if spt.refreshCallbacks != nil { - for _, callback := range spt.refreshCallbacks { - err := callback(spt.inner.Token) - if err != nil { - return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) - } - } - } - return nil -} - -// Refresh obtains a fresh token for the Service Principal. -// This method is safe for concurrent use. -func (spt *ServicePrincipalToken) Refresh() error { - return spt.RefreshWithContext(context.Background()) -} - -// RefreshWithContext obtains a fresh token for the Service Principal. -// This method is safe for concurrent use. -func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - return spt.refreshInternal(ctx, spt.inner.Resource) -} - -// RefreshExchange refreshes the token, but for a different resource. -// This method is safe for concurrent use. -func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { - return spt.RefreshExchangeWithContext(context.Background(), resource) -} - -// RefreshExchangeWithContext refreshes the token, but for a different resource. -// This method is safe for concurrent use. -func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - return spt.refreshInternal(ctx, resource) -} - -func (spt *ServicePrincipalToken) getGrantType() string { - switch spt.inner.Secret.(type) { - case *ServicePrincipalUsernamePasswordSecret: - return OAuthGrantTypeUserPass - case *ServicePrincipalAuthorizationCodeSecret: - return OAuthGrantTypeAuthorizationCode - default: - return OAuthGrantTypeClientCredentials - } -} - -func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { - if spt.customRefreshFunc != nil { - token, err := spt.customRefreshFunc(ctx, resource) - if err != nil { - return err - } - spt.inner.Token = *token - return spt.InvokeRefreshCallbacks(spt.inner.Token) - } - req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) - if err != nil { - return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) - } - req.Header.Add("User-Agent", UserAgent()) - req = req.WithContext(ctx) - var resp *http.Response - authBodyFilter := func(b []byte) []byte { - if logger.Level() != logger.LogAuth { - return []byte("**REDACTED** authentication body") - } - return b - } - if msiSecret, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { - switch msiSecret.msiType { - case msiTypeAppServiceV20170901: - req.Method = http.MethodGet - req.Header.Set("secret", os.Getenv(msiSecretEnv)) - break - case msiTypeCloudShell: - req.Header.Set("Metadata", "true") - data := url.Values{} - data.Set("resource", spt.inner.Resource) - if spt.inner.ClientID != "" { - data.Set("client_id", spt.inner.ClientID) - } else if msiSecret.clientResourceID != "" { - data.Set("msi_res_id", msiSecret.clientResourceID) - } - req.Body = ioutil.NopCloser(strings.NewReader(data.Encode())) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - break - case msiTypeIMDS: - req.Method = http.MethodGet - req.Header.Set("Metadata", "true") - break - } - logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter}) - resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) - } else { - v := url.Values{} - v.Set("client_id", spt.inner.ClientID) - v.Set("resource", resource) - - if spt.inner.Token.RefreshToken != "" { - v.Set("grant_type", OAuthGrantTypeRefreshToken) - v.Set("refresh_token", spt.inner.Token.RefreshToken) - // web apps must specify client_secret when refreshing tokens - // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens - if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { - err := spt.inner.Secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - } else { - v.Set("grant_type", spt.getGrantType()) - err := spt.inner.Secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - req.Body = body - logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter}) - resp, err = spt.sender.Do(req) - } - - // don't return a TokenRefreshError here; this will allow retry logic to apply - if err != nil { - return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) - } else if resp == nil { - return fmt.Errorf("adal: received nil response and error") - } - - logger.Instance.WriteResponse(resp, logger.Filter{Body: authBodyFilter}) - defer resp.Body.Close() - rb, err := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != http.StatusOK { - if err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v Endpoint %s", resp.StatusCode, err, req.URL.String()), resp) - } - return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s Endpoint %s", resp.StatusCode, string(rb), req.URL.String()), resp) - } - - // for the following error cases don't return a TokenRefreshError. the operation succeeded - // but some transient failure happened during deserialization. by returning a generic error - // the retry logic will kick in (we don't retry on TokenRefreshError). - - if err != nil { - return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return fmt.Errorf("adal: Empty service principal token received during refresh") - } - token := struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - - // AAD returns expires_in as a string, ADFS returns it as an int - ExpiresIn json.Number `json:"expires_in"` - // expires_on can be in three formats, a UTC time stamp, or the number of seconds as a string *or* int. - ExpiresOn interface{} `json:"expires_on"` - NotBefore json.Number `json:"not_before"` - - Resource string `json:"resource"` - Type string `json:"token_type"` - }{} - // return a TokenRefreshError in the follow error cases as the token is in an unexpected format - err = json.Unmarshal(rb, &token) - if err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)), resp) - } - expiresOn := json.Number("") - // ADFS doesn't include the expires_on field - if token.ExpiresOn != nil { - if expiresOn, err = parseExpiresOn(token.ExpiresOn); err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: failed to parse expires_on: %v value '%s'", err, token.ExpiresOn), resp) - } - } - spt.inner.Token.AccessToken = token.AccessToken - spt.inner.Token.RefreshToken = token.RefreshToken - spt.inner.Token.ExpiresIn = token.ExpiresIn - spt.inner.Token.ExpiresOn = expiresOn - spt.inner.Token.NotBefore = token.NotBefore - spt.inner.Token.Resource = token.Resource - spt.inner.Token.Type = token.Type - - return spt.InvokeRefreshCallbacks(spt.inner.Token) -} - -// converts expires_on to the number of seconds -func parseExpiresOn(s interface{}) (json.Number, error) { - // the JSON unmarshaler treats JSON numbers unmarshaled into an interface{} as float64 - asFloat64, ok := s.(float64) - if ok { - // this is the number of seconds as int case - return json.Number(strconv.FormatInt(int64(asFloat64), 10)), nil - } - asStr, ok := s.(string) - if !ok { - return "", fmt.Errorf("unexpected expires_on type %T", s) - } - // convert the expiration date to the number of seconds from the unix epoch - timeToDuration := func(t time.Time) json.Number { - return json.Number(strconv.FormatInt(t.UTC().Unix(), 10)) - } - if _, err := json.Number(asStr).Int64(); err == nil { - // this is the number of seconds case, no conversion required - return json.Number(asStr), nil - } else if eo, err := time.Parse(expiresOnDateFormatPM, asStr); err == nil { - return timeToDuration(eo), nil - } else if eo, err := time.Parse(expiresOnDateFormat, asStr); err == nil { - return timeToDuration(eo), nil - } else { - // unknown format - return json.Number(""), err - } -} - -// retry logic specific to retrieving a token from the IMDS endpoint -func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { - // copied from client.go due to circular dependency - retries := []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } - // extra retry status codes specific to IMDS - retries = append(retries, - http.StatusNotFound, - http.StatusGone, - // all remaining 5xx - http.StatusNotImplemented, - http.StatusHTTPVersionNotSupported, - http.StatusVariantAlsoNegotiates, - http.StatusInsufficientStorage, - http.StatusLoopDetected, - http.StatusNotExtended, - http.StatusNetworkAuthenticationRequired) - - // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance - - const maxDelay time.Duration = 60 * time.Second - - attempt := 0 - delay := time.Duration(0) - - // maxAttempts is user-specified, ensure that its value is greater than zero else no request will be made - if maxAttempts < 1 { - maxAttempts = defaultMaxMSIRefreshAttempts - } - - for attempt < maxAttempts { - if resp != nil && resp.Body != nil { - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - } - resp, err = sender.Do(req) - // we want to retry if err is not nil or the status code is in the list of retry codes - if err == nil && !responseHasStatusCode(resp, retries...) { - return - } - - // perform exponential backoff with a cap. - // must increment attempt before calculating delay. - attempt++ - // the base value of 2 is the "delta backoff" as specified in the guidance doc - delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) - if delay > maxDelay { - delay = maxDelay - } - - select { - case <-time.After(delay): - // intentionally left blank - case <-req.Context().Done(): - err = req.Context().Err() - return - } - } - return -} - -func responseHasStatusCode(resp *http.Response, codes ...int) bool { - if resp != nil { - for _, i := range codes { - if i == resp.StatusCode { - return true - } - } - } - return false -} - -// SetAutoRefresh enables or disables automatic refreshing of stale tokens. -func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { - spt.inner.AutoRefresh = autoRefresh -} - -// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will -// refresh the token. -func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { - spt.inner.RefreshWithin = d - return -} - -// SetSender sets the http.Client used when obtaining the Service Principal token. An -// undecorated http.Client is used by default. -func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } - -// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. -func (spt *ServicePrincipalToken) OAuthToken() string { - spt.refreshLock.RLock() - defer spt.refreshLock.RUnlock() - return spt.inner.Token.OAuthToken() -} - -// Token returns a copy of the current token. -func (spt *ServicePrincipalToken) Token() Token { - spt.refreshLock.RLock() - defer spt.refreshLock.RUnlock() - return spt.inner.Token -} - -// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. -type MultiTenantServicePrincipalToken struct { - PrimaryToken *ServicePrincipalToken - AuxiliaryTokens []*ServicePrincipalToken -} - -// PrimaryOAuthToken returns the primary authorization token. -func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { - return mt.PrimaryToken.OAuthToken() -} - -// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. -func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { - tokens := make([]string, len(mt.AuxiliaryTokens)) - for i := range mt.AuxiliaryTokens { - tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() - } - return tokens -} - -// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. -func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(secret, "secret"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - auxTenants := multiTenantCfg.AuxiliaryTenants() - m := MultiTenantServicePrincipalToken{ - AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), - } - primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) - if err != nil { - return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) - } - m.PrimaryToken = primary - for i := range auxTenants { - aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) - if err != nil { - return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) - } - m.AuxiliaryTokens[i] = aux - } - return &m, nil -} - -// NewMultiTenantServicePrincipalTokenFromCertificate creates a new MultiTenantServicePrincipalToken with the specified certificate credentials and resource. -func NewMultiTenantServicePrincipalTokenFromCertificate(multiTenantCfg MultiTenantOAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string) (*MultiTenantServicePrincipalToken, error) { - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if certificate == nil { - return nil, fmt.Errorf("parameter 'certificate' cannot be nil") - } - if privateKey == nil { - return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") - } - auxTenants := multiTenantCfg.AuxiliaryTenants() - m := MultiTenantServicePrincipalToken{ - AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), - } - primary, err := NewServicePrincipalTokenWithSecret( - *multiTenantCfg.PrimaryTenant(), - clientID, - resource, - &ServicePrincipalCertificateSecret{ - PrivateKey: privateKey, - Certificate: certificate, - }, - ) - if err != nil { - return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) - } - m.PrimaryToken = primary - for i := range auxTenants { - aux, err := NewServicePrincipalTokenWithSecret( - *auxTenants[i], - clientID, - resource, - &ServicePrincipalCertificateSecret{ - PrivateKey: privateKey, - Certificate: certificate, - }, - ) - if err != nil { - return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) - } - m.AuxiliaryTokens[i] = aux - } - return &m, nil -} - -// MSIAvailable returns true if the MSI endpoint is available for authentication. -func MSIAvailable(ctx context.Context, s Sender) bool { - msiType, _, err := getMSIType() - - if err != nil { - return false - } - - if msiType != msiTypeIMDS { - return true - } - - if s == nil { - s = sender() - } - - resp, err := getMSIEndpoint(ctx, s) - - if err == nil { - resp.Body.Close() - } - - return err == nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go deleted file mode 100644 index 89190a4213..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build go1.13 -// +build go1.13 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adal - -import ( - "context" - "fmt" - "net/http" - "time" -) - -func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { - tempCtx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - // http.NewRequestWithContext() was added in Go 1.13 - req, _ := http.NewRequestWithContext(tempCtx, http.MethodGet, msiEndpoint, nil) - q := req.URL.Query() - q.Add("api-version", msiAPIVersion) - req.URL.RawQuery = q.Encode() - return sender.Do(req) -} - -// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { - if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh primary token: %w", err) - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.EnsureFreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh auxiliary token: %w", err) - } - } - return nil -} - -// RefreshWithContext obtains a fresh token for the Service Principal. -func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { - if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh primary token: %w", err) - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.RefreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh auxiliary token: %w", err) - } - } - return nil -} - -// RefreshExchangeWithContext refreshes the token, but for a different resource. -func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { - if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { - return fmt.Errorf("failed to refresh primary token: %w", err) - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { - return fmt.Errorf("failed to refresh auxiliary token: %w", err) - } - } - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go deleted file mode 100644 index 27ec4efad7..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go +++ /dev/null @@ -1,75 +0,0 @@ -//go:build !go1.13 -// +build !go1.13 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adal - -import ( - "context" - "net/http" - "time" -) - -func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { - tempCtx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil) - req = req.WithContext(tempCtx) - q := req.URL.Query() - q.Add("api-version", msiAPIVersion) - req.URL.RawQuery = q.Encode() - return sender.Do(req) -} - -// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { - if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { - return err - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.EnsureFreshWithContext(ctx); err != nil { - return err - } - } - return nil -} - -// RefreshWithContext obtains a fresh token for the Service Principal. -func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { - if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { - return err - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.RefreshWithContext(ctx); err != nil { - return err - } - } - return nil -} - -// RefreshExchangeWithContext refreshes the token, but for a different resource. -func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { - if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { - return err - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go deleted file mode 100644 index c867b34843..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go +++ /dev/null @@ -1,45 +0,0 @@ -package adal - -import ( - "fmt" - "runtime" -) - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const number = "v1.0.0" - -var ( - ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - number, - ) -) - -// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. -func UserAgent() string { - return ua -} - -// AddToUserAgent adds an extension to the current user agent -func AddToUserAgent(extension string) error { - if extension != "" { - ua = fmt.Sprintf("%s %s", ua, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go deleted file mode 100644 index 1226c41115..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ /dev/null @@ -1,353 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "crypto/tls" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/Azure/go-autorest/autorest/adal" -) - -const ( - bearerChallengeHeader = "Www-Authenticate" - bearer = "Bearer" - tenantID = "tenantID" - apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" - bingAPISdkHeader = "X-BingApis-SDK-Client" - golangBingAPISdkHeaderValue = "Go-SDK" - authorization = "Authorization" - basic = "Basic" -) - -// Authorizer is the interface that provides a PrepareDecorator used to supply request -// authorization. Most often, the Authorizer decorator runs last so it has access to the full -// state of the formed HTTP request. -type Authorizer interface { - WithAuthorization() PrepareDecorator -} - -// NullAuthorizer implements a default, "do nothing" Authorizer. -type NullAuthorizer struct{} - -// WithAuthorization returns a PrepareDecorator that does nothing. -func (na NullAuthorizer) WithAuthorization() PrepareDecorator { - return WithNothing() -} - -// APIKeyAuthorizer implements API Key authorization. -type APIKeyAuthorizer struct { - headers map[string]interface{} - queryParameters map[string]interface{} -} - -// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. -func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { - return NewAPIKeyAuthorizer(headers, nil) -} - -// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. -func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { - return NewAPIKeyAuthorizer(nil, queryParameters) -} - -// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. -func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { - return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. -func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) - } -} - -// CognitiveServicesAuthorizer implements authorization for Cognitive Services. -type CognitiveServicesAuthorizer struct { - subscriptionKey string -} - -// NewCognitiveServicesAuthorizer is -func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { - return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} -} - -// WithAuthorization is -func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { - headers := make(map[string]interface{}) - headers[apiKeyAuthorizerHeader] = csa.subscriptionKey - headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue - - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// BearerAuthorizer implements the bearer authorization -type BearerAuthorizer struct { - tokenProvider adal.OAuthTokenProvider -} - -// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider -func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { - return &BearerAuthorizer{tokenProvider: tp} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the token. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - // the ordering is important here, prefer RefresherWithContext if available - if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { - err = refresher.EnsureFreshWithContext(r.Context()) - } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { - err = refresher.EnsureFresh() - } - if err != nil { - var resp *http.Response - if tokError, ok := err.(adal.TokenRefreshError); ok { - resp = tokError.Response() - } - return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, - "Failed to refresh the Token for request to %s", r.URL) - } - return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) - } - return r, err - }) - } -} - -// TokenProvider returns OAuthTokenProvider so that it can be used for authorization outside the REST. -func (ba *BearerAuthorizer) TokenProvider() adal.OAuthTokenProvider { - return ba.tokenProvider -} - -// BearerAuthorizerCallbackFunc is the authentication callback signature. -type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) - -// BearerAuthorizerCallback implements bearer authorization via a callback. -type BearerAuthorizerCallback struct { - sender Sender - callback BearerAuthorizerCallbackFunc -} - -// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback -// is invoked when the HTTP request is submitted. -func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { - if s == nil { - s = sender(tls.RenegotiateNever) - } - return &BearerAuthorizerCallback{sender: s, callback: callback} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value -// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - // make a copy of the request and remove the body as it's not - // required and avoids us having to create a copy of it. - rCopy := *r - removeRequestBody(&rCopy) - - resp, err := bacb.sender.Do(&rCopy) - if err != nil { - return r, err - } - DrainResponseBody(resp) - if resp.StatusCode == 401 && hasBearerChallenge(resp.Header) { - bc, err := newBearerChallenge(resp.Header) - if err != nil { - return r, err - } - if bacb.callback != nil { - ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) - if err != nil { - return r, err - } - return Prepare(r, ba.WithAuthorization()) - } - } - } - return r, err - }) - } -} - -// returns true if the HTTP response contains a bearer challenge -func hasBearerChallenge(header http.Header) bool { - authHeader := header.Get(bearerChallengeHeader) - if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { - return false - } - return true -} - -type bearerChallenge struct { - values map[string]string -} - -func newBearerChallenge(header http.Header) (bc bearerChallenge, err error) { - challenge := strings.TrimSpace(header.Get(bearerChallengeHeader)) - trimmedChallenge := challenge[len(bearer)+1:] - - // challenge is a set of key=value pairs that are comma delimited - pairs := strings.Split(trimmedChallenge, ",") - if len(pairs) < 1 { - err = fmt.Errorf("challenge '%s' contains no pairs", challenge) - return bc, err - } - - bc.values = make(map[string]string) - for i := range pairs { - trimmedPair := strings.TrimSpace(pairs[i]) - pair := strings.Split(trimmedPair, "=") - if len(pair) == 2 { - // remove the enclosing quotes - key := strings.Trim(pair[0], "\"") - value := strings.Trim(pair[1], "\"") - - switch key { - case "authorization", "authorization_uri": - // strip the tenant ID from the authorization URL - asURL, err := url.Parse(value) - if err != nil { - return bc, err - } - bc.values[tenantID] = asURL.Path[1:] - default: - bc.values[key] = value - } - } - } - - return bc, err -} - -// EventGridKeyAuthorizer implements authorization for event grid using key authentication. -type EventGridKeyAuthorizer struct { - topicKey string -} - -// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer -// with the specified topic key. -func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { - return EventGridKeyAuthorizer{topicKey: topicKey} -} - -// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. -func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { - headers := map[string]interface{}{ - "aeg-sas-key": egta.topicKey, - } - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header -// with the value "Basic " where is a base64-encoded username:password tuple. -type BasicAuthorizer struct { - userName string - password string -} - -// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. -func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { - return &BasicAuthorizer{ - userName: userName, - password: password, - } -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Basic " followed by the base64-encoded username:password tuple. -func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { - headers := make(map[string]interface{}) - headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) - - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. -type MultiTenantServicePrincipalTokenAuthorizer interface { - WithAuthorization() PrepareDecorator -} - -// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider -func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { - return NewMultiTenantBearerAuthorizer(tp) -} - -// MultiTenantBearerAuthorizer implements bearer authorization across multiple tenants. -type MultiTenantBearerAuthorizer struct { - tp adal.MultitenantOAuthTokenProvider -} - -// NewMultiTenantBearerAuthorizer creates a MultiTenantBearerAuthorizer using the given token provider. -func NewMultiTenantBearerAuthorizer(tp adal.MultitenantOAuthTokenProvider) *MultiTenantBearerAuthorizer { - return &MultiTenantBearerAuthorizer{tp: tp} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the -// primary token along with the auxiliary authorization header using the auxiliary tokens. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (mt *MultiTenantBearerAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { - err = refresher.EnsureFreshWithContext(r.Context()) - if err != nil { - var resp *http.Response - if tokError, ok := err.(adal.TokenRefreshError); ok { - resp = tokError.Response() - } - return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, - "Failed to refresh one or more Tokens for request to %s", r.URL) - } - } - r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) - if err != nil { - return r, err - } - auxTokens := mt.tp.AuxiliaryOAuthTokens() - for i := range auxTokens { - auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) - } - return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, ", "))) - }) - } -} - -// TokenProvider returns the underlying MultitenantOAuthTokenProvider for this authorizer. -func (mt *MultiTenantBearerAuthorizer) TokenProvider() adal.MultitenantOAuthTokenProvider { - return mt.tp -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go deleted file mode 100644 index 66501493bd..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go +++ /dev/null @@ -1,66 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" - "strings" -) - -// SASTokenAuthorizer implements an authorization for SAS Token Authentication -// this can be used for interaction with Blob Storage Endpoints -type SASTokenAuthorizer struct { - sasToken string -} - -// NewSASTokenAuthorizer creates a SASTokenAuthorizer using the given credentials -func NewSASTokenAuthorizer(sasToken string) (*SASTokenAuthorizer, error) { - if strings.TrimSpace(sasToken) == "" { - return nil, fmt.Errorf("sasToken cannot be empty") - } - - token := sasToken - if strings.HasPrefix(sasToken, "?") { - token = strings.TrimPrefix(sasToken, "?") - } - - return &SASTokenAuthorizer{ - sasToken: token, - }, nil -} - -// WithAuthorization returns a PrepareDecorator that adds a shared access signature token to the -// URI's query parameters. This can be used for the Blob, Queue, and File Services. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature -func (sas *SASTokenAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - - if r.URL.RawQuery == "" { - r.URL.RawQuery = sas.sasToken - } else if !strings.Contains(r.URL.RawQuery, sas.sasToken) { - r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken) - } - - return Prepare(r) - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go deleted file mode 100644 index 2af5030a1c..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go +++ /dev/null @@ -1,307 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "sort" - "strings" - "time" -) - -// SharedKeyType defines the enumeration for the various shared key types. -// See https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key for details on the shared key types. -type SharedKeyType string - -const ( - // SharedKey is used to authorize against blobs, files and queues services. - SharedKey SharedKeyType = "sharedKey" - - // SharedKeyForTable is used to authorize against the table service. - SharedKeyForTable SharedKeyType = "sharedKeyTable" - - // SharedKeyLite is used to authorize against blobs, files and queues services. It's provided for - // backwards compatibility with API versions before 2009-09-19. Prefer SharedKey instead. - SharedKeyLite SharedKeyType = "sharedKeyLite" - - // SharedKeyLiteForTable is used to authorize against the table service. It's provided for - // backwards compatibility with older table API versions. Prefer SharedKeyForTable instead. - SharedKeyLiteForTable SharedKeyType = "sharedKeyLiteTable" -) - -const ( - headerAccept = "Accept" - headerAcceptCharset = "Accept-Charset" - headerContentEncoding = "Content-Encoding" - headerContentLength = "Content-Length" - headerContentMD5 = "Content-MD5" - headerContentLanguage = "Content-Language" - headerIfModifiedSince = "If-Modified-Since" - headerIfMatch = "If-Match" - headerIfNoneMatch = "If-None-Match" - headerIfUnmodifiedSince = "If-Unmodified-Since" - headerDate = "Date" - headerXMSDate = "X-Ms-Date" - headerXMSVersion = "x-ms-version" - headerRange = "Range" -) - -const storageEmulatorAccountName = "devstoreaccount1" - -// SharedKeyAuthorizer implements an authorization for Shared Key -// this can be used for interaction with Blob, File and Queue Storage Endpoints -type SharedKeyAuthorizer struct { - accountName string - accountKey []byte - keyType SharedKeyType -} - -// NewSharedKeyAuthorizer creates a SharedKeyAuthorizer using the provided credentials and shared key type. -func NewSharedKeyAuthorizer(accountName, accountKey string, keyType SharedKeyType) (*SharedKeyAuthorizer, error) { - key, err := base64.StdEncoding.DecodeString(accountKey) - if err != nil { - return nil, fmt.Errorf("malformed storage account key: %v", err) - } - return &SharedKeyAuthorizer{ - accountName: accountName, - accountKey: key, - keyType: keyType, - }, nil -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is " " followed by the computed key. -// This can be used for the Blob, Queue, and File Services -// -// from: https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key -// You may use Shared Key authorization to authorize a request made against the -// 2009-09-19 version and later of the Blob and Queue services, -// and version 2014-02-14 and later of the File services. -func (sk *SharedKeyAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - - sk, err := buildSharedKey(sk.accountName, sk.accountKey, r, sk.keyType) - if err != nil { - return r, err - } - return Prepare(r, WithHeader(headerAuthorization, sk)) - }) - } -} - -func buildSharedKey(accName string, accKey []byte, req *http.Request, keyType SharedKeyType) (string, error) { - canRes, err := buildCanonicalizedResource(accName, req.URL.String(), keyType) - if err != nil { - return "", err - } - - if req.Header == nil { - req.Header = http.Header{} - } - - // ensure date is set - if req.Header.Get(headerDate) == "" && req.Header.Get(headerXMSDate) == "" { - date := time.Now().UTC().Format(http.TimeFormat) - req.Header.Set(headerXMSDate, date) - } - canString, err := buildCanonicalizedString(req.Method, req.Header, canRes, keyType) - if err != nil { - return "", err - } - return createAuthorizationHeader(accName, accKey, canString, keyType), nil -} - -func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType) (string, error) { - errMsg := "buildCanonicalizedResource error: %s" - u, err := url.Parse(uri) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - cr := bytes.NewBufferString("") - if accountName != storageEmulatorAccountName { - cr.WriteString("/") - cr.WriteString(getCanonicalizedAccountName(accountName)) - } - - if len(u.Path) > 0 { - // Any portion of the CanonicalizedResource string that is derived from - // the resource's URI should be encoded exactly as it is in the URI. - // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx - cr.WriteString(u.EscapedPath()) - } else { - // a slash is required to indicate the root path - cr.WriteString("/") - } - - params, err := url.ParseQuery(u.RawQuery) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 - if keyType == SharedKey { - if len(params) > 0 { - cr.WriteString("\n") - - keys := []string{} - for key := range params { - keys = append(keys, key) - } - sort.Strings(keys) - - completeParams := []string{} - for _, key := range keys { - if len(params[key]) > 1 { - sort.Strings(params[key]) - } - - completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) - } - cr.WriteString(strings.Join(completeParams, "\n")) - } - } else { - // search for "comp" parameter, if exists then add it to canonicalizedresource - if v, ok := params["comp"]; ok { - cr.WriteString("?comp=" + v[0]) - } - } - - return string(cr.Bytes()), nil -} - -func getCanonicalizedAccountName(accountName string) string { - // since we may be trying to access a secondary storage account, we need to - // remove the -secondary part of the storage name - return strings.TrimSuffix(accountName, "-secondary") -} - -func buildCanonicalizedString(verb string, headers http.Header, canonicalizedResource string, keyType SharedKeyType) (string, error) { - contentLength := headers.Get(headerContentLength) - if contentLength == "0" { - contentLength = "" - } - date := headers.Get(headerDate) - if v := headers.Get(headerXMSDate); v != "" { - if keyType == SharedKey || keyType == SharedKeyLite { - date = "" - } else { - date = v - } - } - var canString string - switch keyType { - case SharedKey: - canString = strings.Join([]string{ - verb, - headers.Get(headerContentEncoding), - headers.Get(headerContentLanguage), - contentLength, - headers.Get(headerContentMD5), - headers.Get(headerContentType), - date, - headers.Get(headerIfModifiedSince), - headers.Get(headerIfMatch), - headers.Get(headerIfNoneMatch), - headers.Get(headerIfUnmodifiedSince), - headers.Get(headerRange), - buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - case SharedKeyForTable: - canString = strings.Join([]string{ - verb, - headers.Get(headerContentMD5), - headers.Get(headerContentType), - date, - canonicalizedResource, - }, "\n") - case SharedKeyLite: - canString = strings.Join([]string{ - verb, - headers.Get(headerContentMD5), - headers.Get(headerContentType), - date, - buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - case SharedKeyLiteForTable: - canString = strings.Join([]string{ - date, - canonicalizedResource, - }, "\n") - default: - return "", fmt.Errorf("key type '%s' is not supported", keyType) - } - return canString, nil -} - -func buildCanonicalizedHeader(headers http.Header) string { - cm := make(map[string]string) - - for k := range headers { - headerName := strings.TrimSpace(strings.ToLower(k)) - if strings.HasPrefix(headerName, "x-ms-") { - cm[headerName] = headers.Get(k) - } - } - - if len(cm) == 0 { - return "" - } - - keys := []string{} - for key := range cm { - keys = append(keys, key) - } - - sort.Strings(keys) - - ch := bytes.NewBufferString("") - - for _, key := range keys { - ch.WriteString(key) - ch.WriteRune(':') - ch.WriteString(cm[key]) - ch.WriteRune('\n') - } - - return strings.TrimSuffix(string(ch.Bytes()), "\n") -} - -func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string { - h := hmac.New(sha256.New, accountKey) - h.Write([]byte(canonicalizedString)) - signature := base64.StdEncoding.EncodeToString(h.Sum(nil)) - var key string - switch keyType { - case SharedKey, SharedKeyForTable: - key = "SharedKey" - case SharedKeyLite, SharedKeyLiteForTable: - key = "SharedKeyLite" - } - return fmt.Sprintf("%s %s:%s", key, getCanonicalizedAccountName(accountName), signature) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go deleted file mode 100644 index 211c98d1ed..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines -and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) -generated Go code. - -The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, -and Responding. A typical pattern is: - - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) - - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) - - err = Respond(resp, - ByDiscardingBody(), - ByClosing()) - -Each phase relies on decorators to modify and / or manage processing. Decorators may first modify -and then pass the data along, pass the data first and then modify the result, or wrap themselves -around passing the data (such as a logger might do). Decorators run in the order provided. For -example, the following: - - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) - -will set the URL to: - - https://microsoft.com/a/b/c - -Preparers and Responders may be shared and re-used (assuming the underlying decorators support -sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders -shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, -all bound together by means of input / output channels. - -Decorators hold their passed state within a closure (such as the path components in the example -above). Be careful to share Preparers and Responders only in a context where such held state -applies. For example, it may not make sense to share a Preparer that applies a query string from a -fixed set of values. Similarly, sharing a Responder that reads the response body into a passed -struct (e.g., ByUnmarshallingJson) is likely incorrect. - -Lastly, the Swagger specification (https://swagger.io) that drives AutoRest -(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The -github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure -correct parsing and formatting. - -Errors raised by autorest objects and methods will conform to the autorest.Error interface. - -See the included examples for more detail. For details on the suggested use of this package by -generated clients, see the Client described below. -*/ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "net/http" - "time" -) - -const ( - // HeaderLocation specifies the HTTP Location header. - HeaderLocation = "Location" - - // HeaderRetryAfter specifies the HTTP Retry-After header. - HeaderRetryAfter = "Retry-After" -) - -// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set -// and false otherwise. -func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { - if resp == nil { - return false - } - return containsInt(codes, resp.StatusCode) -} - -// GetLocation retrieves the URL from the Location header of the passed response. -func GetLocation(resp *http.Response) string { - return resp.Header.Get(HeaderLocation) -} - -// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If -// the header is absent or is malformed, it will return the supplied default delay time.Duration. -func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { - retry := resp.Header.Get(HeaderRetryAfter) - if retry == "" { - return defaultDelay - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - return defaultDelay - } - - return d -} - -// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. -func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare(&http.Request{Cancel: cancel}, - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} - -// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. -func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare((&http.Request{}).WithContext(ctx), - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go deleted file mode 100644 index 45575eedbf..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ /dev/null @@ -1,995 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/logger" - "github.com/Azure/go-autorest/tracing" -) - -const ( - headerAsyncOperation = "Azure-AsyncOperation" -) - -const ( - operationInProgress string = "InProgress" - operationCanceled string = "Canceled" - operationFailed string = "Failed" - operationSucceeded string = "Succeeded" -) - -var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} - -// FutureAPI contains the set of methods on the Future type. -type FutureAPI interface { - // Response returns the last HTTP response. - Response() *http.Response - - // Status returns the last status message of the operation. - Status() string - - // PollingMethod returns the method used to monitor the status of the asynchronous operation. - PollingMethod() PollingMethodType - - // DoneWithContext queries the service to see if the operation has completed. - DoneWithContext(context.Context, autorest.Sender) (bool, error) - - // GetPollingDelay returns a duration the application should wait before checking - // the status of the asynchronous request and true; this value is returned from - // the service via the Retry-After response header. If the header wasn't returned - // then the function returns the zero-value time.Duration and false. - GetPollingDelay() (time.Duration, bool) - - // WaitForCompletionRef will return when one of the following conditions is met: the long - // running operation has completed, the provided context is cancelled, or the client's - // polling duration has been exceeded. It will retry failed polling attempts based on - // the retry value defined in the client up to the maximum retry attempts. - // If no deadline is specified in the context then the client.PollingDuration will be - // used to determine if a default deadline should be used. - // If PollingDuration is greater than zero the value will be used as the context's timeout. - // If PollingDuration is zero then no default deadline will be used. - WaitForCompletionRef(context.Context, autorest.Client) error - - // MarshalJSON implements the json.Marshaler interface. - MarshalJSON() ([]byte, error) - - // MarshalJSON implements the json.Unmarshaler interface. - UnmarshalJSON([]byte) error - - // PollingURL returns the URL used for retrieving the status of the long-running operation. - PollingURL() string - - // GetResult should be called once polling has completed successfully. - // It makes the final GET call to retrieve the resultant payload. - GetResult(autorest.Sender) (*http.Response, error) -} - -var _ FutureAPI = (*Future)(nil) - -// Future provides a mechanism to access the status and results of an asynchronous request. -// Since futures are stateful they should be passed by value to avoid race conditions. -type Future struct { - pt pollingTracker -} - -// NewFutureFromResponse returns a new Future object initialized -// with the initial response from an asynchronous operation. -func NewFutureFromResponse(resp *http.Response) (Future, error) { - pt, err := createPollingTracker(resp) - return Future{pt: pt}, err -} - -// Response returns the last HTTP response. -func (f Future) Response() *http.Response { - if f.pt == nil { - return nil - } - return f.pt.latestResponse() -} - -// Status returns the last status message of the operation. -func (f Future) Status() string { - if f.pt == nil { - return "" - } - return f.pt.pollingStatus() -} - -// PollingMethod returns the method used to monitor the status of the asynchronous operation. -func (f Future) PollingMethod() PollingMethodType { - if f.pt == nil { - return PollingUnknown - } - return f.pt.pollingMethod() -} - -// DoneWithContext queries the service to see if the operation has completed. -func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { - ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") - defer func() { - sc := -1 - resp := f.Response() - if resp != nil { - sc = resp.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - - if f.pt == nil { - return false, autorest.NewError("Future", "Done", "future is not initialized") - } - if f.pt.hasTerminated() { - return true, f.pt.pollingError() - } - if err := f.pt.pollForStatus(ctx, sender); err != nil { - return false, err - } - if err := f.pt.checkForErrors(); err != nil { - return f.pt.hasTerminated(), err - } - if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { - return false, err - } - if err := f.pt.initPollingMethod(); err != nil { - return false, err - } - if err := f.pt.updatePollingMethod(); err != nil { - return false, err - } - return f.pt.hasTerminated(), f.pt.pollingError() -} - -// GetPollingDelay returns a duration the application should wait before checking -// the status of the asynchronous request and true; this value is returned from -// the service via the Retry-After response header. If the header wasn't returned -// then the function returns the zero-value time.Duration and false. -func (f Future) GetPollingDelay() (time.Duration, bool) { - if f.pt == nil { - return 0, false - } - resp := f.pt.latestResponse() - if resp == nil { - return 0, false - } - - retry := resp.Header.Get(autorest.HeaderRetryAfter) - if retry == "" { - return 0, false - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - panic(err) - } - - return d, true -} - -// WaitForCompletionRef will return when one of the following conditions is met: the long -// running operation has completed, the provided context is cancelled, or the client's -// polling duration has been exceeded. It will retry failed polling attempts based on -// the retry value defined in the client up to the maximum retry attempts. -// If no deadline is specified in the context then the client.PollingDuration will be -// used to determine if a default deadline should be used. -// If PollingDuration is greater than zero the value will be used as the context's timeout. -// If PollingDuration is zero then no default deadline will be used. -func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { - ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") - defer func() { - sc := -1 - resp := f.Response() - if resp != nil { - sc = resp.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - cancelCtx := ctx - // if the provided context already has a deadline don't override it - _, hasDeadline := ctx.Deadline() - if d := client.PollingDuration; !hasDeadline && d != 0 { - var cancel context.CancelFunc - cancelCtx, cancel = context.WithTimeout(ctx, d) - defer cancel() - } - // if the initial response has a Retry-After, sleep for the specified amount of time before starting to poll - if delay, ok := f.GetPollingDelay(); ok { - logger.Instance.Writeln(logger.LogInfo, "WaitForCompletionRef: initial polling delay") - if delayElapsed := autorest.DelayForBackoff(delay, 0, cancelCtx.Done()); !delayElapsed { - err = cancelCtx.Err() - return - } - } - done, err := f.DoneWithContext(ctx, client) - for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { - if attempts >= client.RetryAttempts { - return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") - } - // we want delayAttempt to be zero in the non-error case so - // that DelayForBackoff doesn't perform exponential back-off - var delayAttempt int - var delay time.Duration - if err == nil { - // check for Retry-After delay, if not present use the client's polling delay - var ok bool - delay, ok = f.GetPollingDelay() - if !ok { - logger.Instance.Writeln(logger.LogInfo, "WaitForCompletionRef: Using client polling delay") - delay = client.PollingDelay - } - } else { - // there was an error polling for status so perform exponential - // back-off based on the number of attempts using the client's retry - // duration. update attempts after delayAttempt to avoid off-by-one. - logger.Instance.Writef(logger.LogError, "WaitForCompletionRef: %s\n", err) - delayAttempt = attempts - delay = client.RetryDuration - attempts++ - } - // wait until the delay elapses or the context is cancelled - delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) - if !delayElapsed { - return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") - } - } - return -} - -// MarshalJSON implements the json.Marshaler interface. -func (f Future) MarshalJSON() ([]byte, error) { - return json.Marshal(f.pt) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (f *Future) UnmarshalJSON(data []byte) error { - // unmarshal into JSON object to determine the tracker type - obj := map[string]interface{}{} - err := json.Unmarshal(data, &obj) - if err != nil { - return err - } - if obj["method"] == nil { - return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") - } - method := obj["method"].(string) - switch strings.ToUpper(method) { - case http.MethodDelete: - f.pt = &pollingTrackerDelete{} - case http.MethodPatch: - f.pt = &pollingTrackerPatch{} - case http.MethodPost: - f.pt = &pollingTrackerPost{} - case http.MethodPut: - f.pt = &pollingTrackerPut{} - default: - return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) - } - // now unmarshal into the tracker - return json.Unmarshal(data, &f.pt) -} - -// PollingURL returns the URL used for retrieving the status of the long-running operation. -func (f Future) PollingURL() string { - if f.pt == nil { - return "" - } - return f.pt.pollingURL() -} - -// GetResult should be called once polling has completed successfully. -// It makes the final GET call to retrieve the resultant payload. -func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { - if f.pt.finalGetURL() == "" { - // we can end up in this situation if the async operation returns a 200 - // with no polling URLs. in that case return the response which should - // contain the JSON payload (only do this for successful terminal cases). - if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { - return lr, nil - } - return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") - } - req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) - if err != nil { - return nil, err - } - resp, err := sender.Do(req) - if err == nil && resp.Body != nil { - // copy the body and close it so callers don't have to - defer resp.Body.Close() - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return resp, err - } - resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - return resp, err -} - -type pollingTracker interface { - // these methods can differ per tracker - - // checks the response headers and status code to determine the polling mechanism - updatePollingMethod() error - - // checks the response for tracker-specific error conditions - checkForErrors() error - - // returns true if provisioning state should be checked - provisioningStateApplicable() bool - - // methods common to all trackers - - // initializes a tracker's polling URL and method, called for each iteration. - // these values can be overridden by each polling tracker as required. - initPollingMethod() error - - // initializes the tracker's internal state, call this when the tracker is created - initializeState() error - - // makes an HTTP request to check the status of the LRO - pollForStatus(ctx context.Context, sender autorest.Sender) error - - // updates internal tracker state, call this after each call to pollForStatus - updatePollingState(provStateApl bool) error - - // returns the error response from the service, can be nil - pollingError() error - - // returns the polling method being used - pollingMethod() PollingMethodType - - // returns the state of the LRO as returned from the service - pollingStatus() string - - // returns the URL used for polling status - pollingURL() string - - // returns the URL used for the final GET to retrieve the resource - finalGetURL() string - - // returns true if the LRO is in a terminal state - hasTerminated() bool - - // returns true if the LRO is in a failed terminal state - hasFailed() bool - - // returns true if the LRO is in a successful terminal state - hasSucceeded() bool - - // returns the cached HTTP response after a call to pollForStatus(), can be nil - latestResponse() *http.Response -} - -type pollingTrackerBase struct { - // resp is the last response, either from the submission of the LRO or from polling - resp *http.Response - - // method is the HTTP verb, this is needed for deserialization - Method string `json:"method"` - - // rawBody is the raw JSON response body - rawBody map[string]interface{} - - // denotes if polling is using async-operation or location header - Pm PollingMethodType `json:"pollingMethod"` - - // the URL to poll for status - URI string `json:"pollingURI"` - - // the state of the LRO as returned from the service - State string `json:"lroState"` - - // the URL to GET for the final result - FinalGetURI string `json:"resultURI"` - - // used to hold an error object returned from the service - Err *ServiceError `json:"error,omitempty"` -} - -func (pt *pollingTrackerBase) initializeState() error { - // determine the initial polling state based on response body and/or HTTP status - // code. this is applicable to the initial LRO response, not polling responses! - pt.Method = pt.resp.Request.Method - if err := pt.updateRawBody(); err != nil { - return err - } - switch pt.resp.StatusCode { - case http.StatusOK: - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - if pt.hasFailed() { - pt.updateErrorFromResponse() - return pt.pollingError() - } - } else { - pt.State = operationSucceeded - } - case http.StatusCreated: - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - } else { - pt.State = operationInProgress - } - case http.StatusAccepted: - pt.State = operationInProgress - case http.StatusNoContent: - pt.State = operationSucceeded - default: - pt.State = operationFailed - pt.updateErrorFromResponse() - return pt.pollingError() - } - return pt.initPollingMethod() -} - -func (pt pollingTrackerBase) getProvisioningState() *string { - if pt.rawBody != nil && pt.rawBody["properties"] != nil { - p := pt.rawBody["properties"].(map[string]interface{}) - if ps := p["provisioningState"]; ps != nil { - s := ps.(string) - return &s - } - } - return nil -} - -func (pt *pollingTrackerBase) updateRawBody() error { - pt.rawBody = map[string]interface{}{} - if pt.resp.ContentLength != 0 { - defer pt.resp.Body.Close() - b, err := ioutil.ReadAll(pt.resp.Body) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") - } - // put the body back so it's available to other callers - pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty - if len(b) == 0 { - return nil - } - if err = json.Unmarshal(b, &pt.rawBody); err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") - } - } - return nil -} - -func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { - req, err := http.NewRequest(http.MethodGet, pt.URI, nil) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") - } - - req = req.WithContext(ctx) - preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) - req, err = preparer.Prepare(req) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") - } - pt.resp, err = sender.Do(req) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") - } - if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { - // reset the service error on success case - pt.Err = nil - err = pt.updateRawBody() - } else { - // check response body for error content - pt.updateErrorFromResponse() - err = pt.pollingError() - } - return err -} - -// attempts to unmarshal a ServiceError type from the response body. -// if that fails then make a best attempt at creating something meaningful. -// NOTE: this assumes that the async operation has failed. -func (pt *pollingTrackerBase) updateErrorFromResponse() { - var err error - if pt.resp.ContentLength != 0 { - type respErr struct { - ServiceError *ServiceError `json:"error"` - } - re := respErr{} - defer pt.resp.Body.Close() - var b []byte - if b, err = ioutil.ReadAll(pt.resp.Body); err != nil { - goto Default - } - // put the body back so it's available to other callers - pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - if len(b) == 0 { - goto Default - } - if err = json.Unmarshal(b, &re); err != nil { - goto Default - } - // unmarshalling the error didn't yield anything, try unwrapped error - if re.ServiceError == nil { - err = json.Unmarshal(b, &re.ServiceError) - if err != nil { - goto Default - } - } - // the unmarshaller will ensure re.ServiceError is non-nil - // even if there was no content unmarshalled so check the code. - if re.ServiceError.Code != "" { - pt.Err = re.ServiceError - return - } - } -Default: - se := &ServiceError{ - Code: pt.pollingStatus(), - Message: "The async operation failed.", - } - if err != nil { - se.InnerError = make(map[string]interface{}) - se.InnerError["unmarshalError"] = err.Error() - } - // stick the response body into the error object in hopes - // it contains something useful to help diagnose the failure. - if len(pt.rawBody) > 0 { - se.AdditionalInfo = []map[string]interface{}{ - pt.rawBody, - } - } - pt.Err = se -} - -func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { - if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { - pt.State = pt.rawBody["status"].(string) - } else { - if pt.resp.StatusCode == http.StatusAccepted { - pt.State = operationInProgress - } else if provStateApl { - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - } else { - pt.State = operationSucceeded - } - } else { - return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") - } - } - // if the operation has failed update the error state - if pt.hasFailed() { - pt.updateErrorFromResponse() - } - return nil -} - -func (pt pollingTrackerBase) pollingError() error { - if pt.Err == nil { - return nil - } - return pt.Err -} - -func (pt pollingTrackerBase) pollingMethod() PollingMethodType { - return pt.Pm -} - -func (pt pollingTrackerBase) pollingStatus() string { - return pt.State -} - -func (pt pollingTrackerBase) pollingURL() string { - return pt.URI -} - -func (pt pollingTrackerBase) finalGetURL() string { - return pt.FinalGetURI -} - -func (pt pollingTrackerBase) hasTerminated() bool { - return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) -} - -func (pt pollingTrackerBase) hasFailed() bool { - return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) -} - -func (pt pollingTrackerBase) hasSucceeded() bool { - return strings.EqualFold(pt.State, operationSucceeded) -} - -func (pt pollingTrackerBase) latestResponse() *http.Response { - return pt.resp -} - -// error checking common to all trackers -func (pt pollingTrackerBase) baseCheckForErrors() error { - // for Azure-AsyncOperations the response body cannot be nil or empty - if pt.Pm == PollingAsyncOperation { - if pt.resp.Body == nil || pt.resp.ContentLength == 0 { - return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") - } - if pt.rawBody["status"] == nil { - return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") - } - } - return nil -} - -// default initialization of polling URL/method. each verb tracker will update this as required. -func (pt *pollingTrackerBase) initPollingMethod() error { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - return nil - } - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh != "" { - pt.URI = lh - pt.Pm = PollingLocation - return nil - } - // it's ok if we didn't find a polling header, this will be handled elsewhere - return nil -} - -// DELETE - -type pollingTrackerDelete struct { - pollingTrackerBase -} - -func (pt *pollingTrackerDelete) updatePollingMethod() error { - // for 201 the Location header is required - if pt.resp.StatusCode == http.StatusCreated { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") - } else { - pt.URI = lh - } - pt.Pm = PollingLocation - pt.FinalGetURI = pt.URI - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - // when both headers are returned we use the value in the Location header for the final GET - pt.FinalGetURI = lh - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerDelete) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerDelete) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent -} - -// PATCH - -type pollingTrackerPatch struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPatch) updatePollingMethod() error { - // by default we can use the original URL for polling and final GET - if pt.URI == "" { - pt.URI = pt.resp.Request.URL.String() - } - if pt.FinalGetURI == "" { - pt.FinalGetURI = pt.resp.Request.URL.String() - } - if pt.Pm == PollingUnknown { - pt.Pm = PollingRequestURI - } - // for 201 it's permissible for no headers to be returned - if pt.resp.StatusCode == http.StatusCreated { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - // note the absence of the "final GET" mechanism for PATCH - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - if ao == "" { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } else { - pt.URI = lh - pt.Pm = PollingLocation - } - } - } - return nil -} - -func (pt pollingTrackerPatch) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerPatch) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated -} - -// POST - -type pollingTrackerPost struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPost) updatePollingMethod() error { - // 201 requires Location header - if pt.resp.StatusCode == http.StatusCreated { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") - } else { - pt.URI = lh - pt.FinalGetURI = lh - pt.Pm = PollingLocation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - // when both headers are returned we use the value in the Location header for the final GET - pt.FinalGetURI = lh - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerPost) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerPost) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent -} - -// PUT - -type pollingTrackerPut struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPut) updatePollingMethod() error { - // by default we can use the original URL for polling and final GET - if pt.URI == "" { - pt.URI = pt.resp.Request.URL.String() - } - if pt.FinalGetURI == "" { - pt.FinalGetURI = pt.resp.Request.URL.String() - } - if pt.Pm == PollingUnknown { - pt.Pm = PollingRequestURI - } - // for 201 it's permissible for no headers to be returned - if pt.resp.StatusCode == http.StatusCreated { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerPut) checkForErrors() error { - err := pt.baseCheckForErrors() - if err != nil { - return err - } - // if there are no LRO headers then the body cannot be empty - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } - lh, err := getURLFromLocationHeader(pt.resp) - if err != nil { - return err - } - if ao == "" && lh == "" && len(pt.rawBody) == 0 { - return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") - } - return nil -} - -func (pt pollingTrackerPut) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated -} - -// creates a polling tracker based on the verb of the original request -func createPollingTracker(resp *http.Response) (pollingTracker, error) { - var pt pollingTracker - switch strings.ToUpper(resp.Request.Method) { - case http.MethodDelete: - pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPatch: - pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPost: - pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPut: - pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} - default: - return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) - } - if err := pt.initializeState(); err != nil { - return pt, err - } - // this initializes the polling header values, we do this during creation in case the - // initial response send us invalid values; this way the API call will return a non-nil - // error (not doing this means the error shows up in Future.Done) - return pt, pt.updatePollingMethod() -} - -// gets the polling URL from the Azure-AsyncOperation header. -// ensures the URL is well-formed and absolute. -func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { - s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) - if s == "" { - return "", nil - } - if !isValidURL(s) { - return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) - } - return s, nil -} - -// gets the polling URL from the Location header. -// ensures the URL is well-formed and absolute. -func getURLFromLocationHeader(resp *http.Response) (string, error) { - s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) - if s == "" { - return "", nil - } - if !isValidURL(s) { - return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) - } - return s, nil -} - -// verify that the URL is valid and absolute -func isValidURL(s string) bool { - u, err := url.Parse(s) - return err == nil && u.IsAbs() -} - -// PollingMethodType defines a type used for enumerating polling mechanisms. -type PollingMethodType string - -const ( - // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. - PollingAsyncOperation PollingMethodType = "AsyncOperation" - - // PollingLocation indicates the polling method uses the Location header. - PollingLocation PollingMethodType = "Location" - - // PollingRequestURI indicates the polling method uses the original request URI. - PollingRequestURI PollingMethodType = "RequestURI" - - // PollingUnknown indicates an unknown polling method and is the default value. - PollingUnknown PollingMethodType = "" -) - -// AsyncOpIncompleteError is the type that's returned from a future that has not completed. -type AsyncOpIncompleteError struct { - // FutureType is the name of the type composed of a azure.Future. - FutureType string -} - -// Error returns an error message including the originating type name of the error. -func (e AsyncOpIncompleteError) Error() string { - return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) -} - -// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. -func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { - return AsyncOpIncompleteError{ - FutureType: futureType, - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE deleted file mode 100644 index b9d6a27ea9..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/README.md b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/README.md deleted file mode 100644 index 05bef8a800..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/README.md +++ /dev/null @@ -1,152 +0,0 @@ -# NOTE: This module will go out of support by March 31, 2023. For authenticating with Azure AD, use module [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) instead. For help migrating from `auth` to `azidentiy` please consult the [migration guide](https://aka.ms/azsdk/go/identity/migration). General information about the retirement of this and other legacy modules can be found [here](https://azure.microsoft.com/updates/support-for-azure-sdk-libraries-that-do-not-conform-to-our-current-azure-sdk-guidelines-will-be-retired-as-of-31-march-2023/). - -## Authentication - -Typical SDK operations must be authenticated and authorized. The `autorest.Authorizer` -interface allows use of any auth style in requests, such as inserting an OAuth2 -Authorization header and bearer token received from Azure AD. - -The SDK itself provides a simple way to get an authorizer which first checks -for OAuth client credentials in environment variables and then falls back to -Azure's [Managed Service Identity]() when available, e.g. when on an Azure -VM. The following snippet from [the previous section](#use) demonstrates -this helper. - -```go -import "github.com/Azure/go-autorest/autorest/azure/auth" - -// create a VirtualNetworks client -vnetClient := network.NewVirtualNetworksClient("") - -// create an authorizer from env vars or Azure Managed Service Idenity -authorizer, err := auth.NewAuthorizerFromEnvironment() -if err != nil { - handle(err) -} - -vnetClient.Authorizer = authorizer - -// call the VirtualNetworks CreateOrUpdate API -vnetClient.CreateOrUpdate(context.Background(), -// ... -``` - -The following environment variables help determine authentication configuration: - -- `AZURE_ENVIRONMENT`: Specifies the Azure Environment to use. If not set, it - defaults to `AzurePublicCloud`. Not applicable to authentication with Managed - Service Identity (MSI). -- `AZURE_AD_RESOURCE`: Specifies the AAD resource ID to use. If not set, it - defaults to `ResourceManagerEndpoint` for operations with Azure Resource - Manager. You can also choose an alternate resource programmatically with - `auth.NewAuthorizerFromEnvironmentWithResource(resource string)`. - -### More Authentication Details - -The previous is the first and most recommended of several authentication -options offered by the SDK because it allows seamless use of both service -principals and [Azure Managed Service Identity][]. Other options are listed -below. - -> Note: If you need to create a new service principal, run `az ad sp create-for-rbac -n ""` in the -> [azure-cli](https://github.com/Azure/azure-cli). See [these -> docs](https://docs.microsoft.com/cli/azure/create-an-azure-service-principal-azure-cli?view=azure-cli-latest) -> for more info. Copy the new principal's ID, secret, and tenant ID for use in -> your app, or consider the `--sdk-auth` parameter for serialized output. - -[azure managed service identity]: https://docs.microsoft.com/azure/active-directory/msi-overview - -- The `auth.NewAuthorizerFromEnvironment()` described above creates an authorizer - from the first available of the following configuration: - - 1. **Client Credentials**: Azure AD Application ID and Secret. - - - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. - - `AZURE_CLIENT_ID`: Specifies the app client ID to use. - - `AZURE_CLIENT_SECRET`: Specifies the app secret to use. - - 2. **Client Certificate**: Azure AD Application ID and X.509 Certificate. - - - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. - - `AZURE_CLIENT_ID`: Specifies the app client ID to use. - - `AZURE_CERTIFICATE_PATH`: Specifies the certificate Path to use. - - `AZURE_CERTIFICATE_PASSWORD`: Specifies the certificate password to use. - - 3. **Resource Owner Password**: Azure AD User and Password. This grant type is *not - recommended*, use device login instead if you need interactive login. - - - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. - - `AZURE_CLIENT_ID`: Specifies the app client ID to use. - - `AZURE_USERNAME`: Specifies the username to use. - - `AZURE_PASSWORD`: Specifies the password to use. - - 4. **Azure Managed Service Identity**: Delegate credential management to the - platform. Requires that code is running in Azure, e.g. on a VM. All - configuration is handled by Azure. See [Azure Managed Service - Identity](https://docs.microsoft.com/azure/active-directory/msi-overview) - for more details. - -- The `auth.NewAuthorizerFromFile()` method creates an authorizer using - credentials from an auth file created by the [Azure CLI][]. Follow these - steps to utilize: - - 1. Create a service principal and output an auth file using `az ad sp create-for-rbac --sdk-auth > client_credentials.json`. - 2. Set environment variable `AZURE_AUTH_LOCATION` to the path of the saved - output file. - 3. Use the authorizer returned by `auth.NewAuthorizerFromFile()` in your - client as described above. - -- The `auth.NewAuthorizerFromCLI()` method creates an authorizer which - uses [Azure CLI][] to obtain its credentials. - - The default audience being requested is `https://management.azure.com` (Azure ARM API). - To specify your own audience, export `AZURE_AD_RESOURCE` as an evironment variable. - This is read by `auth.NewAuthorizerFromCLI()` and passed to Azure CLI to acquire the access token. - - For example, to request an access token for Azure Key Vault, export - ``` - AZURE_AD_RESOURCE="https://vault.azure.net" - ``` - -- `auth.NewAuthorizerFromCLIWithResource(AUDIENCE_URL_OR_APPLICATION_ID)` - this method is self contained and does - not require exporting environment variables. For example, to request an access token for Azure Key Vault: - ``` - auth.NewAuthorizerFromCLIWithResource("https://vault.azure.net") - ``` - - To use `NewAuthorizerFromCLI()` or `NewAuthorizerFromCLIWithResource()`, follow these steps: - - 1. Install [Azure CLI v2.0.12](https://docs.microsoft.com/cli/azure/install-azure-cli) or later. Upgrade earlier versions. - 2. Use `az login` to sign in to Azure. - - If you receive an error, use `az account get-access-token` to verify access. - - If Azure CLI is not installed to the default directory, you may receive an error - reporting that `az` cannot be found. - Use the `AzureCLIPath` environment variable to define the Azure CLI installation folder. - - If you are signed in to Azure CLI using multiple accounts or your account has - access to multiple subscriptions, you need to specify the specific subscription - to be used. To do so, use: - - ``` - az account set --subscription - ``` - - To verify the current account settings, use: - - ``` - az account list - ``` - -[azure cli]: https://github.com/Azure/azure-cli - -- Finally, you can use OAuth's [Device Flow][] by calling - `auth.NewDeviceFlowConfig()` and extracting the Authorizer as follows: - - ```go - config := auth.NewDeviceFlowConfig(clientID, tenantID) - a, err := config.Authorizer() - ``` - -[device flow]: https://oauth.net/2/device-flow/ diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go deleted file mode 100644 index e97589dcdc..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go +++ /dev/null @@ -1,772 +0,0 @@ -package auth - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "context" - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "os" - "strings" - "unicode/utf16" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/azure/cli" - "github.com/Azure/go-autorest/logger" - "github.com/dimchansky/utfbom" -) - -// The possible keys in the Values map. -const ( - SubscriptionID = "AZURE_SUBSCRIPTION_ID" - TenantID = "AZURE_TENANT_ID" - AuxiliaryTenantIDs = "AZURE_AUXILIARY_TENANT_IDS" - ClientID = "AZURE_CLIENT_ID" - ClientSecret = "AZURE_CLIENT_SECRET" - CertificatePath = "AZURE_CERTIFICATE_PATH" - CertificatePassword = "AZURE_CERTIFICATE_PASSWORD" - Username = "AZURE_USERNAME" - Password = "AZURE_PASSWORD" - EnvironmentName = "AZURE_ENVIRONMENT" - Resource = "AZURE_AD_RESOURCE" - ActiveDirectoryEndpoint = "ActiveDirectoryEndpoint" - ResourceManagerEndpoint = "ResourceManagerEndpoint" - GraphResourceID = "GraphResourceID" - SQLManagementEndpoint = "SQLManagementEndpoint" - GalleryEndpoint = "GalleryEndpoint" - ManagementEndpoint = "ManagementEndpoint" -) - -// NewAuthorizerFromEnvironment creates an Authorizer configured from environment variables in the order: -// 1. Client credentials -// 2. Client certificate -// 3. Username password -// 4. MSI -func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) { - logger.Instance.Writeln(logger.LogInfo, "NewAuthorizerFromEnvironment() determining authentication mechanism") - settings, err := GetSettingsFromEnvironment() - if err != nil { - return nil, err - } - return settings.GetAuthorizer() -} - -// NewAuthorizerFromEnvironmentWithResource creates an Authorizer configured from environment variables in the order: -// 1. Client credentials -// 2. Client certificate -// 3. Username password -// 4. MSI -func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) { - logger.Instance.Writeln(logger.LogInfo, "NewAuthorizerFromEnvironmentWithResource() determining authentication mechanism") - settings, err := GetSettingsFromEnvironment() - if err != nil { - return nil, err - } - settings.Values[Resource] = resource - return settings.GetAuthorizer() -} - -// EnvironmentSettings contains the available authentication settings. -type EnvironmentSettings struct { - Values map[string]string - Environment azure.Environment -} - -// GetSettingsFromEnvironment returns the available authentication settings from the environment. -func GetSettingsFromEnvironment() (s EnvironmentSettings, err error) { - s = EnvironmentSettings{ - Values: map[string]string{}, - } - s.setValue(SubscriptionID) - s.setValue(TenantID) - s.setValue(AuxiliaryTenantIDs) - s.setValue(ClientID) - s.setValue(ClientSecret) - s.setValue(CertificatePath) - s.setValue(CertificatePassword) - s.setValue(Username) - s.setValue(Password) - s.setValue(EnvironmentName) - s.setValue(Resource) - if v := s.Values[EnvironmentName]; v == "" { - s.Environment = azure.PublicCloud - } else { - s.Environment, err = azure.EnvironmentFromName(v) - } - if s.Values[Resource] == "" { - s.Values[Resource] = s.Environment.ResourceManagerEndpoint - } - return -} - -// GetSubscriptionID returns the available subscription ID or an empty string. -func (settings EnvironmentSettings) GetSubscriptionID() string { - return settings.Values[SubscriptionID] -} - -// adds the specified environment variable value to the Values map if it exists -func (settings EnvironmentSettings) setValue(key string) { - if v := os.Getenv(key); v != "" { - logger.Instance.Writef(logger.LogInfo, "GetSettingsFromEnvironment() found environment var %s\n", key) - settings.Values[key] = v - } -} - -// helper to return client and tenant IDs -func (settings EnvironmentSettings) getClientAndTenant() (string, string) { - clientID := settings.Values[ClientID] - tenantID := settings.Values[TenantID] - return clientID, tenantID -} - -// GetClientCredentials creates a config object from the available client credentials. -// An error is returned if no client credentials are available. -func (settings EnvironmentSettings) GetClientCredentials() (ClientCredentialsConfig, error) { - secret := settings.Values[ClientSecret] - if secret == "" { - logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetClientCredentials() missing client secret") - return ClientCredentialsConfig{}, errors.New("missing client secret") - } - clientID, tenantID := settings.getClientAndTenant() - config := NewClientCredentialsConfig(clientID, secret, tenantID) - config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint - config.Resource = settings.Values[Resource] - if auxTenants, ok := settings.Values[AuxiliaryTenantIDs]; ok { - config.AuxTenants = strings.Split(auxTenants, ";") - for i := range config.AuxTenants { - config.AuxTenants[i] = strings.TrimSpace(config.AuxTenants[i]) - } - } - return config, nil -} - -// GetClientCertificate creates a config object from the available certificate credentials. -// An error is returned if no certificate credentials are available. -func (settings EnvironmentSettings) GetClientCertificate() (ClientCertificateConfig, error) { - certPath := settings.Values[CertificatePath] - if certPath == "" { - logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetClientCertificate() missing certificate path") - return ClientCertificateConfig{}, errors.New("missing certificate path") - } - certPwd := settings.Values[CertificatePassword] - clientID, tenantID := settings.getClientAndTenant() - config := NewClientCertificateConfig(certPath, certPwd, clientID, tenantID) - config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint - config.Resource = settings.Values[Resource] - return config, nil -} - -// GetUsernamePassword creates a config object from the available username/password credentials. -// An error is returned if no username/password credentials are available. -func (settings EnvironmentSettings) GetUsernamePassword() (UsernamePasswordConfig, error) { - username := settings.Values[Username] - password := settings.Values[Password] - if username == "" || password == "" { - logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetUsernamePassword() missing username and/or password") - return UsernamePasswordConfig{}, errors.New("missing username/password") - } - clientID, tenantID := settings.getClientAndTenant() - config := NewUsernamePasswordConfig(username, password, clientID, tenantID) - config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint - config.Resource = settings.Values[Resource] - return config, nil -} - -// GetMSI creates a MSI config object from the available client ID. -func (settings EnvironmentSettings) GetMSI() MSIConfig { - config := NewMSIConfig() - config.Resource = settings.Values[Resource] - config.ClientID = settings.Values[ClientID] - return config -} - -// GetDeviceFlow creates a device-flow config object from the available client and tenant IDs. -func (settings EnvironmentSettings) GetDeviceFlow() DeviceFlowConfig { - clientID, tenantID := settings.getClientAndTenant() - config := NewDeviceFlowConfig(clientID, tenantID) - config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint - config.Resource = settings.Values[Resource] - return config -} - -// GetAuthorizer creates an Authorizer configured from environment variables in the order: -// 1. Client credentials -// 2. Client certificate -// 3. Username password -// 4. MSI -func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) { - //1.Client Credentials - if c, e := settings.GetClientCredentials(); e == nil { - logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetAuthorizer() using client secret credentials") - return c.Authorizer() - } - - //2. Client Certificate - if c, e := settings.GetClientCertificate(); e == nil { - logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetAuthorizer() using client certificate credentials") - return c.Authorizer() - } - - //3. Username Password - if c, e := settings.GetUsernamePassword(); e == nil { - logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetAuthorizer() using user name/password credentials") - return c.Authorizer() - } - - // 4. MSI - if !adal.MSIAvailable(context.Background(), nil) { - return nil, errors.New("MSI not available") - } - logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetAuthorizer() using MSI authentication") - return settings.GetMSI().Authorizer() -} - -// NewAuthorizerFromFile creates an Authorizer configured from a configuration file in the following order. -// 1. Client credentials -// 2. Client certificate -// The path to the configuration file must be specified in the AZURE_AUTH_LOCATION environment variable. -// resourceBaseURI - used to determine the resource type -func NewAuthorizerFromFile(resourceBaseURI string) (autorest.Authorizer, error) { - settings, err := GetSettingsFromFile() - if err != nil { - return nil, err - } - return settings.GetAuthorizer(resourceBaseURI) -} - -// GetAuthorizer create an Authorizer in the following order. -// 1. Client credentials -// 2. Client certificate -// resourceBaseURI - used to determine the resource type -func (settings FileSettings) GetAuthorizer(resourceBaseURI string) (autorest.Authorizer, error) { - if resourceBaseURI == "" { - resourceBaseURI = azure.PublicCloud.ServiceManagementEndpoint - } - if a, err := settings.ClientCredentialsAuthorizer(resourceBaseURI); err == nil { - return a, err - } - if a, err := settings.ClientCertificateAuthorizer(resourceBaseURI); err == nil { - return a, err - } - return nil, errors.New("auth file missing client and certificate credentials") -} - -// NewAuthorizerFromFileWithResource creates an Authorizer configured from a configuration file in the following order. -// 1. Client credentials -// 2. Client certificate -// The path to the configuration file must be specified in the AZURE_AUTH_LOCATION environment variable. -func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) { - s, err := GetSettingsFromFile() - if err != nil { - return nil, err - } - if a, err := s.ClientCredentialsAuthorizerWithResource(resource); err == nil { - return a, err - } - if a, err := s.ClientCertificateAuthorizerWithResource(resource); err == nil { - return a, err - } - return nil, errors.New("auth file missing client and certificate credentials") -} - -// NewAuthorizerFromCLI creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. -func NewAuthorizerFromCLI() (autorest.Authorizer, error) { - settings, err := GetSettingsFromEnvironment() - if err != nil { - return nil, err - } - - if settings.Values[Resource] == "" { - settings.Values[Resource] = settings.Environment.ResourceManagerEndpoint - } - - return NewAuthorizerFromCLIWithResource(settings.Values[Resource]) -} - -// NewAuthorizerFromCLIWithResource creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. -func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, error) { - token, err := cli.GetTokenFromCLI(resource) - if err != nil { - return nil, err - } - - adalToken, err := token.ToADALToken() - if err != nil { - return nil, err - } - - return autorest.NewBearerAuthorizer(&adalToken), nil -} - -// GetSettingsFromFile returns the available authentication settings from an Azure CLI authentication file. -func GetSettingsFromFile() (FileSettings, error) { - s := FileSettings{} - fileLocation := os.Getenv("AZURE_AUTH_LOCATION") - if fileLocation == "" { - return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set") - } - - contents, err := ioutil.ReadFile(fileLocation) - if err != nil { - return s, err - } - - // Auth file might be encoded - decoded, err := decode(contents) - if err != nil { - return s, err - } - - authFile := map[string]interface{}{} - err = json.Unmarshal(decoded, &authFile) - if err != nil { - return s, err - } - - s.Values = map[string]string{} - s.setKeyValue(ClientID, authFile["clientId"]) - s.setKeyValue(ClientSecret, authFile["clientSecret"]) - s.setKeyValue(CertificatePath, authFile["clientCertificate"]) - s.setKeyValue(CertificatePassword, authFile["clientCertificatePassword"]) - s.setKeyValue(SubscriptionID, authFile["subscriptionId"]) - s.setKeyValue(TenantID, authFile["tenantId"]) - s.setKeyValue(ActiveDirectoryEndpoint, authFile["activeDirectoryEndpointUrl"]) - s.setKeyValue(ResourceManagerEndpoint, authFile["resourceManagerEndpointUrl"]) - s.setKeyValue(GraphResourceID, authFile["activeDirectoryGraphResourceId"]) - s.setKeyValue(SQLManagementEndpoint, authFile["sqlManagementEndpointUrl"]) - s.setKeyValue(GalleryEndpoint, authFile["galleryEndpointUrl"]) - s.setKeyValue(ManagementEndpoint, authFile["managementEndpointUrl"]) - return s, nil -} - -// FileSettings contains the available authentication settings. -type FileSettings struct { - Values map[string]string -} - -// GetSubscriptionID returns the available subscription ID or an empty string. -func (settings FileSettings) GetSubscriptionID() string { - return settings.Values[SubscriptionID] -} - -// adds the specified value to the Values map if it isn't nil -func (settings FileSettings) setKeyValue(key string, val interface{}) { - if val != nil { - settings.Values[key] = val.(string) - } -} - -// returns the specified AAD endpoint or the public cloud endpoint if unspecified -func (settings FileSettings) getAADEndpoint() string { - if v, ok := settings.Values[ActiveDirectoryEndpoint]; ok { - return v - } - return azure.PublicCloud.ActiveDirectoryEndpoint -} - -// ServicePrincipalTokenFromClientCredentials creates a ServicePrincipalToken from the available client credentials. -func (settings FileSettings) ServicePrincipalTokenFromClientCredentials(baseURI string) (*adal.ServicePrincipalToken, error) { - resource, err := settings.getResourceForToken(baseURI) - if err != nil { - return nil, err - } - return settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource) -} - -// ClientCredentialsAuthorizer creates an authorizer from the available client credentials. -func (settings FileSettings) ClientCredentialsAuthorizer(baseURI string) (autorest.Authorizer, error) { - resource, err := settings.getResourceForToken(baseURI) - if err != nil { - return nil, err - } - return settings.ClientCredentialsAuthorizerWithResource(resource) -} - -// ServicePrincipalTokenFromClientCredentialsWithResource creates a ServicePrincipalToken -// from the available client credentials and the specified resource. -func (settings FileSettings) ServicePrincipalTokenFromClientCredentialsWithResource(resource string) (*adal.ServicePrincipalToken, error) { - if _, ok := settings.Values[ClientSecret]; !ok { - return nil, errors.New("missing client secret") - } - config, err := adal.NewOAuthConfig(settings.getAADEndpoint(), settings.Values[TenantID]) - if err != nil { - return nil, err - } - return adal.NewServicePrincipalToken(*config, settings.Values[ClientID], settings.Values[ClientSecret], resource) -} - -func (settings FileSettings) clientCertificateConfigWithResource(resource string) (ClientCertificateConfig, error) { - if _, ok := settings.Values[CertificatePath]; !ok { - return ClientCertificateConfig{}, errors.New("missing certificate path") - } - cfg := NewClientCertificateConfig(settings.Values[CertificatePath], settings.Values[CertificatePassword], settings.Values[ClientID], settings.Values[TenantID]) - cfg.AADEndpoint = settings.getAADEndpoint() - cfg.Resource = resource - return cfg, nil -} - -// ClientCredentialsAuthorizerWithResource creates an authorizer from the available client credentials and the specified resource. -func (settings FileSettings) ClientCredentialsAuthorizerWithResource(resource string) (autorest.Authorizer, error) { - spToken, err := settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource) - if err != nil { - return nil, err - } - return autorest.NewBearerAuthorizer(spToken), nil -} - -// ServicePrincipalTokenFromClientCertificate creates a ServicePrincipalToken from the available certificate credentials. -func (settings FileSettings) ServicePrincipalTokenFromClientCertificate(baseURI string) (*adal.ServicePrincipalToken, error) { - resource, err := settings.getResourceForToken(baseURI) - if err != nil { - return nil, err - } - return settings.ServicePrincipalTokenFromClientCertificateWithResource(resource) -} - -// ClientCertificateAuthorizer creates an authorizer from the available certificate credentials. -func (settings FileSettings) ClientCertificateAuthorizer(baseURI string) (autorest.Authorizer, error) { - resource, err := settings.getResourceForToken(baseURI) - if err != nil { - return nil, err - } - return settings.ClientCertificateAuthorizerWithResource(resource) -} - -// ServicePrincipalTokenFromClientCertificateWithResource creates a ServicePrincipalToken from the available certificate credentials. -func (settings FileSettings) ServicePrincipalTokenFromClientCertificateWithResource(resource string) (*adal.ServicePrincipalToken, error) { - cfg, err := settings.clientCertificateConfigWithResource(resource) - if err != nil { - return nil, err - } - return cfg.ServicePrincipalToken() -} - -// ClientCertificateAuthorizerWithResource creates an authorizer from the available certificate credentials and the specified resource. -func (settings FileSettings) ClientCertificateAuthorizerWithResource(resource string) (autorest.Authorizer, error) { - cfg, err := settings.clientCertificateConfigWithResource(resource) - if err != nil { - return nil, err - } - return cfg.Authorizer() -} - -func decode(b []byte) ([]byte, error) { - reader, enc := utfbom.Skip(bytes.NewReader(b)) - - switch enc { - case utfbom.UTF16LittleEndian: - u16 := make([]uint16, (len(b)/2)-1) - err := binary.Read(reader, binary.LittleEndian, &u16) - if err != nil { - return nil, err - } - return []byte(string(utf16.Decode(u16))), nil - case utfbom.UTF16BigEndian: - u16 := make([]uint16, (len(b)/2)-1) - err := binary.Read(reader, binary.BigEndian, &u16) - if err != nil { - return nil, err - } - return []byte(string(utf16.Decode(u16))), nil - } - return ioutil.ReadAll(reader) -} - -func (settings FileSettings) getResourceForToken(baseURI string) (string, error) { - // Compare default base URI from the SDK to the endpoints from the public cloud - // Base URI and token resource are the same string. This func finds the authentication - // file field that matches the SDK base URI. The SDK defines the public cloud - // endpoint as its default base URI - if !strings.HasSuffix(baseURI, "/") { - baseURI += "/" - } - switch baseURI { - case azure.PublicCloud.ServiceManagementEndpoint: - return settings.Values[ManagementEndpoint], nil - case azure.PublicCloud.ResourceManagerEndpoint: - return settings.Values[ResourceManagerEndpoint], nil - case azure.PublicCloud.ActiveDirectoryEndpoint: - return settings.Values[ActiveDirectoryEndpoint], nil - case azure.PublicCloud.GalleryEndpoint: - return settings.Values[GalleryEndpoint], nil - case azure.PublicCloud.GraphEndpoint: - return settings.Values[GraphResourceID], nil - } - return "", fmt.Errorf("auth: base URI not found in endpoints") -} - -// NewClientCredentialsConfig creates an AuthorizerConfig object configured to obtain an Authorizer through Client Credentials. -// Defaults to Public Cloud and Resource Manager Endpoint. -func NewClientCredentialsConfig(clientID string, clientSecret string, tenantID string) ClientCredentialsConfig { - return ClientCredentialsConfig{ - ClientID: clientID, - ClientSecret: clientSecret, - TenantID: tenantID, - Resource: azure.PublicCloud.ResourceManagerEndpoint, - AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, - } -} - -// NewClientCertificateConfig creates a ClientCertificateConfig object configured to obtain an Authorizer through client certificate. -// Defaults to Public Cloud and Resource Manager Endpoint. -func NewClientCertificateConfig(certificatePath string, certificatePassword string, clientID string, tenantID string) ClientCertificateConfig { - return ClientCertificateConfig{ - CertificatePath: certificatePath, - CertificatePassword: certificatePassword, - ClientID: clientID, - TenantID: tenantID, - Resource: azure.PublicCloud.ResourceManagerEndpoint, - AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, - } -} - -// NewUsernamePasswordConfig creates an UsernamePasswordConfig object configured to obtain an Authorizer through username and password. -// Defaults to Public Cloud and Resource Manager Endpoint. -func NewUsernamePasswordConfig(username string, password string, clientID string, tenantID string) UsernamePasswordConfig { - return UsernamePasswordConfig{ - Username: username, - Password: password, - ClientID: clientID, - TenantID: tenantID, - Resource: azure.PublicCloud.ResourceManagerEndpoint, - AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, - } -} - -// NewMSIConfig creates an MSIConfig object configured to obtain an Authorizer through MSI. -func NewMSIConfig() MSIConfig { - return MSIConfig{ - Resource: azure.PublicCloud.ResourceManagerEndpoint, - } -} - -// NewDeviceFlowConfig creates a DeviceFlowConfig object configured to obtain an Authorizer through device flow. -// Defaults to Public Cloud and Resource Manager Endpoint. -func NewDeviceFlowConfig(clientID string, tenantID string) DeviceFlowConfig { - return DeviceFlowConfig{ - ClientID: clientID, - TenantID: tenantID, - Resource: azure.PublicCloud.ResourceManagerEndpoint, - AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, - } -} - -// AuthorizerConfig provides an authorizer from the configuration provided. -type AuthorizerConfig interface { - Authorizer() (autorest.Authorizer, error) -} - -// ClientCredentialsConfig provides the options to get a bearer authorizer from client credentials. -type ClientCredentialsConfig struct { - ClientID string - ClientSecret string - TenantID string - AuxTenants []string - AADEndpoint string - Resource string -} - -// ServicePrincipalToken creates a ServicePrincipalToken from client credentials. -func (ccc ClientCredentialsConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { - oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) - if err != nil { - return nil, err - } - return adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) -} - -// MultiTenantServicePrincipalToken creates a MultiTenantServicePrincipalToken from client credentials. -func (ccc ClientCredentialsConfig) MultiTenantServicePrincipalToken() (*adal.MultiTenantServicePrincipalToken, error) { - oauthConfig, err := adal.NewMultiTenantOAuthConfig(ccc.AADEndpoint, ccc.TenantID, ccc.AuxTenants, adal.OAuthOptions{}) - if err != nil { - return nil, err - } - return adal.NewMultiTenantServicePrincipalToken(oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) -} - -// Authorizer gets the authorizer from client credentials. -func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) { - if len(ccc.AuxTenants) == 0 { - spToken, err := ccc.ServicePrincipalToken() - if err != nil { - return nil, fmt.Errorf("failed to get SPT from client credentials: %v", err) - } - return autorest.NewBearerAuthorizer(spToken), nil - } - mtSPT, err := ccc.MultiTenantServicePrincipalToken() - if err != nil { - return nil, fmt.Errorf("failed to get multitenant SPT from client credentials: %v", err) - } - return autorest.NewMultiTenantServicePrincipalTokenAuthorizer(mtSPT), nil -} - -// ClientCertificateConfig provides the options to get a bearer authorizer from a client certificate. -type ClientCertificateConfig struct { - ClientID string - CertificatePath string - CertificatePassword string - TenantID string - AuxTenants []string - AADEndpoint string - Resource string -} - -// ServicePrincipalToken creates a ServicePrincipalToken from client certificate. -func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { - oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) - if err != nil { - return nil, err - } - certData, err := ioutil.ReadFile(ccc.CertificatePath) - if err != nil { - return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) - } - certificate, rsaPrivateKey, err := adal.DecodePfxCertificateData(certData, ccc.CertificatePassword) - if err != nil { - return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) - } - return adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource) -} - -// MultiTenantServicePrincipalToken creates a MultiTenantServicePrincipalToken from client certificate. -func (ccc ClientCertificateConfig) MultiTenantServicePrincipalToken() (*adal.MultiTenantServicePrincipalToken, error) { - oauthConfig, err := adal.NewMultiTenantOAuthConfig(ccc.AADEndpoint, ccc.TenantID, ccc.AuxTenants, adal.OAuthOptions{}) - if err != nil { - return nil, err - } - certData, err := ioutil.ReadFile(ccc.CertificatePath) - if err != nil { - return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) - } - certificate, rsaPrivateKey, err := adal.DecodePfxCertificateData(certData, ccc.CertificatePassword) - if err != nil { - return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) - } - return adal.NewMultiTenantServicePrincipalTokenFromCertificate(oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource) -} - -// Authorizer gets an authorizer object from client certificate. -func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) { - if len(ccc.AuxTenants) == 0 { - spToken, err := ccc.ServicePrincipalToken() - if err != nil { - return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err) - } - return autorest.NewBearerAuthorizer(spToken), nil - } - mtSPT, err := ccc.MultiTenantServicePrincipalToken() - if err != nil { - return nil, fmt.Errorf("failed to get multitenant SPT from certificate auth: %v", err) - } - return autorest.NewMultiTenantServicePrincipalTokenAuthorizer(mtSPT), nil -} - -// DeviceFlowConfig provides the options to get a bearer authorizer using device flow authentication. -type DeviceFlowConfig struct { - ClientID string - TenantID string - AADEndpoint string - Resource string -} - -// Authorizer gets the authorizer from device flow. -func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) { - spToken, err := dfc.ServicePrincipalToken() - if err != nil { - return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err) - } - return autorest.NewBearerAuthorizer(spToken), nil -} - -// ServicePrincipalToken gets the service principal token from device flow. -func (dfc DeviceFlowConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { - oauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID) - if err != nil { - return nil, err - } - oauthClient := &autorest.Client{} - deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource) - if err != nil { - return nil, fmt.Errorf("failed to start device auth flow: %s", err) - } - log.Println(*deviceCode.Message) - token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) - if err != nil { - return nil, fmt.Errorf("failed to finish device auth flow: %s", err) - } - return adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token) -} - -// UsernamePasswordConfig provides the options to get a bearer authorizer from a username and a password. -type UsernamePasswordConfig struct { - ClientID string - Username string - Password string - TenantID string - AADEndpoint string - Resource string -} - -// ServicePrincipalToken creates a ServicePrincipalToken from username and password. -func (ups UsernamePasswordConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { - oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID) - if err != nil { - return nil, err - } - return adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource) -} - -// Authorizer gets the authorizer from a username and a password. -func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) { - spToken, err := ups.ServicePrincipalToken() - if err != nil { - return nil, fmt.Errorf("failed to get oauth token from username and password auth: %v", err) - } - return autorest.NewBearerAuthorizer(spToken), nil -} - -// MSIConfig provides the options to get a bearer authorizer through MSI. -type MSIConfig struct { - Resource string - ClientID string -} - -// ServicePrincipalToken creates a ServicePrincipalToken from MSI. -func (mc MSIConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { - spToken, err := adal.NewServicePrincipalTokenFromManagedIdentity(mc.Resource, &adal.ManagedIdentityOptions{ - ClientID: mc.ClientID, - }) - if err != nil { - return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err) - } - return spToken, nil -} - -// Authorizer gets the authorizer from MSI. -func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) { - spToken, err := mc.ServicePrincipalToken() - if err != nil { - return nil, err - } - - return autorest.NewBearerAuthorizer(spToken), nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go deleted file mode 100644 index f7eb26fd36..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build modhack -// +build modhack - -package auth - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go deleted file mode 100644 index 868345db68..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ /dev/null @@ -1,388 +0,0 @@ -// Package azure provides Azure-specific implementations used with AutoRest. -// See the included examples for more detail. -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "regexp" - "strconv" - "strings" - - "github.com/Azure/go-autorest/autorest" -) - -const ( - // HeaderClientID is the Azure extension header to set a user-specified request ID. - HeaderClientID = "x-ms-client-request-id" - - // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID - // should be included in the response. - HeaderReturnClientID = "x-ms-return-client-request-id" - - // HeaderContentType is the type of the content in the HTTP response. - HeaderContentType = "Content-Type" - - // HeaderRequestID is the Azure extension header of the service generated request ID returned - // in the response. - HeaderRequestID = "x-ms-request-id" -) - -// ServiceError encapsulates the error response from an Azure service. -// It adhears to the OData v4 specification for error responses. -type ServiceError struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` -} - -func (se ServiceError) Error() string { - result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) - - if se.Target != nil { - result += fmt.Sprintf(" Target=%q", *se.Target) - } - - if se.Details != nil { - d, err := json.Marshal(se.Details) - if err != nil { - result += fmt.Sprintf(" Details=%v", se.Details) - } - result += fmt.Sprintf(" Details=%s", d) - } - - if se.InnerError != nil { - d, err := json.Marshal(se.InnerError) - if err != nil { - result += fmt.Sprintf(" InnerError=%v", se.InnerError) - } - result += fmt.Sprintf(" InnerError=%s", d) - } - - if se.AdditionalInfo != nil { - d, err := json.Marshal(se.AdditionalInfo) - if err != nil { - result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) - } - result += fmt.Sprintf(" AdditionalInfo=%s", d) - } - - return result -} - -// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. -func (se *ServiceError) UnmarshalJSON(b []byte) error { - // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 - - type serviceErrorInternal struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target,omitempty"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo,omitempty"` - // not all services conform to the OData v4 spec. - // the following fields are where we've seen discrepancies - - // spec calls for []map[string]interface{} but have seen map[string]interface{} - Details interface{} `json:"details,omitempty"` - - // spec calls for map[string]interface{} but have seen []map[string]interface{} and string - InnerError interface{} `json:"innererror,omitempty"` - } - - sei := serviceErrorInternal{} - if err := json.Unmarshal(b, &sei); err != nil { - return err - } - - // copy the fields we know to be correct - se.AdditionalInfo = sei.AdditionalInfo - se.Code = sei.Code - se.Message = sei.Message - se.Target = sei.Target - - // converts an []interface{} to []map[string]interface{} - arrayOfObjs := func(v interface{}) ([]map[string]interface{}, bool) { - arrayOf, ok := v.([]interface{}) - if !ok { - return nil, false - } - final := []map[string]interface{}{} - for _, item := range arrayOf { - as, ok := item.(map[string]interface{}) - if !ok { - return nil, false - } - final = append(final, as) - } - return final, true - } - - // convert the remaining fields, falling back to raw JSON if necessary - - if c, ok := arrayOfObjs(sei.Details); ok { - se.Details = c - } else if c, ok := sei.Details.(map[string]interface{}); ok { - se.Details = []map[string]interface{}{c} - } else if sei.Details != nil { - // stuff into Details - se.Details = []map[string]interface{}{ - {"raw": sei.Details}, - } - } - - if c, ok := sei.InnerError.(map[string]interface{}); ok { - se.InnerError = c - } else if c, ok := arrayOfObjs(sei.InnerError); ok { - // if there's only one error extract it - if len(c) == 1 { - se.InnerError = c[0] - } else { - // multiple errors, stuff them into the value - se.InnerError = map[string]interface{}{ - "multi": c, - } - } - } else if c, ok := sei.InnerError.(string); ok { - se.InnerError = map[string]interface{}{"error": c} - } else if sei.InnerError != nil { - // stuff into InnerError - se.InnerError = map[string]interface{}{ - "raw": sei.InnerError, - } - } - return nil -} - -// RequestError describes an error response returned by Azure service. -type RequestError struct { - autorest.DetailedError - - // The error returned by the Azure service. - ServiceError *ServiceError `json:"error" xml:"Error"` - - // The request id (from the x-ms-request-id-header) of the request. - RequestID string -} - -// Error returns a human-friendly error message from service error. -func (e RequestError) Error() string { - return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", - e.StatusCode, e.ServiceError) -} - -// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. -func IsAzureError(e error) bool { - _, ok := e.(*RequestError) - return ok -} - -// Resource contains details about an Azure resource. -type Resource struct { - SubscriptionID string - ResourceGroup string - Provider string - ResourceType string - ResourceName string -} - -// String function returns a string in form of azureResourceID -func (r Resource) String() string { - return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s", r.SubscriptionID, r.ResourceGroup, r.Provider, r.ResourceType, r.ResourceName) -} - -// ParseResourceID parses a resource ID into a ResourceDetails struct. -// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/template-functions-resource?tabs=json#resourceid. -func ParseResourceID(resourceID string) (Resource, error) { - - const resourceIDPatternText = `(?i)^/subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)$` - resourceIDPattern := regexp.MustCompile(resourceIDPatternText) - match := resourceIDPattern.FindStringSubmatch(resourceID) - - if len(match) == 0 { - return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) - } - - v := strings.Split(match[5], "/") - resourceName := v[len(v)-1] - - result := Resource{ - SubscriptionID: match[1], - ResourceGroup: match[2], - Provider: match[3], - ResourceType: match[4], - ResourceName: resourceName, - } - - return result, nil -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { - if v, ok := original.(*RequestError); ok { - return *v - } - - statusCode := autorest.UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - return RequestError{ - DetailedError: autorest.DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - }, - } -} - -// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id -// header to true such that UUID accompanies the http.Response. -func WithReturningClientID(uuid string) autorest.PrepareDecorator { - preparer := autorest.CreatePreparer( - WithClientID(uuid), - WithReturnClientID(true)) - - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - return preparer.Prepare(r) - }) - } -} - -// WithClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). -func WithClientID(uuid string) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderClientID, uuid) -} - -// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-return-client-request-id whose boolean value indicates if the value of the -// x-ms-client-request-id header should be included in the http.Response. -func WithReturnClientID(b bool) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) -} - -// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the -// http.Request sent to the service (and returned in the http.Response) -func ExtractClientID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderClientID, resp) -} - -// ExtractRequestID extracts the Azure server generated request identifier from the -// x-ms-request-id header. -func ExtractRequestID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderRequestID, resp) -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an -// azure.RequestError by reading the response body unless the response HTTP status code -// is among the set passed. -// -// If there is a chance service may return responses other than the Azure error -// format and the response cannot be parsed into an error, a decoding error will -// be returned containing the response body. In any case, the Responder will -// return an error if the status code is not satisfied. -// -// If this Responder returns an error, the response body will be replaced with -// an in-memory reader, which needs no further closing. -func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { - return func(r autorest.Responder) autorest.Responder { - return autorest.ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { - var e RequestError - defer resp.Body.Close() - - encodedAs := autorest.EncodedAsJSON - if strings.Contains(resp.Header.Get("Content-Type"), "xml") { - encodedAs = autorest.EncodedAsXML - } - - // Copy and replace the Body in case it does not contain an error object. - // This will leave the Body available to the caller. - b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e) - resp.Body = ioutil.NopCloser(&b) - if decodeErr != nil { - return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, decodeErr) - } - if e.ServiceError == nil { - // Check if error is unwrapped ServiceError - decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) - if err := decoder.Decode(&e.ServiceError); err != nil { - return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, err) - } - - // for example, should the API return the literal value `null` as the response - if e.ServiceError == nil { - e.ServiceError = &ServiceError{ - Code: "Unknown", - Message: "Unknown service error", - Details: []map[string]interface{}{ - { - "HttpResponse.Body": b.String(), - }, - }, - } - } - } - - if e.ServiceError != nil && e.ServiceError.Message == "" { - // if we're here it means the returned error wasn't OData v4 compliant. - // try to unmarshal the body in hopes of getting something. - rawBody := map[string]interface{}{} - decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) - if err := decoder.Decode(&rawBody); err != nil { - return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, err) - } - - e.ServiceError = &ServiceError{ - Code: "Unknown", - Message: "Unknown service error", - } - if len(rawBody) > 0 { - e.ServiceError.Details = []map[string]interface{}{rawBody} - } - } - e.Response = resp - e.RequestID = ExtractRequestID(resp) - if e.StatusCode == nil { - e.StatusCode = resp.StatusCode - } - err = &e - } - return err - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE deleted file mode 100644 index b9d6a27ea9..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go deleted file mode 100644 index 50d6f0391a..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build modhack -// +build modhack - -package cli - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go deleted file mode 100644 index f45c3a516d..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go +++ /dev/null @@ -1,83 +0,0 @@ -package cli - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/dimchansky/utfbom" - "github.com/mitchellh/go-homedir" -) - -// Profile represents a Profile from the Azure CLI -type Profile struct { - InstallationID string `json:"installationId"` - Subscriptions []Subscription `json:"subscriptions"` -} - -// Subscription represents a Subscription from the Azure CLI -type Subscription struct { - EnvironmentName string `json:"environmentName"` - ID string `json:"id"` - IsDefault bool `json:"isDefault"` - Name string `json:"name"` - State string `json:"state"` - TenantID string `json:"tenantId"` - User *User `json:"user"` -} - -// User represents a User from the Azure CLI -type User struct { - Name string `json:"name"` - Type string `json:"type"` -} - -const azureProfileJSON = "azureProfile.json" - -func configDir() string { - return os.Getenv("AZURE_CONFIG_DIR") -} - -// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI -func ProfilePath() (string, error) { - if cfgDir := configDir(); cfgDir != "" { - return filepath.Join(cfgDir, azureProfileJSON), nil - } - return homedir.Expand("~/.azure/" + azureProfileJSON) -} - -// LoadProfile restores a Profile object from a file located at 'path'. -func LoadProfile(path string) (result Profile, err error) { - var contents []byte - contents, err = ioutil.ReadFile(path) - if err != nil { - err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) - return - } - reader := utfbom.SkipOnly(bytes.NewReader(contents)) - - dec := json.NewDecoder(reader) - if err = dec.Decode(&result); err != nil { - err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err) - return - } - - return -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go deleted file mode 100644 index 486619111c..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go +++ /dev/null @@ -1,227 +0,0 @@ -package cli - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "fmt" - "os" - "os/exec" - "path/filepath" - "regexp" - "runtime" - "strconv" - "time" - - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/date" - "github.com/mitchellh/go-homedir" -) - -// Token represents an AccessToken from the Azure CLI -type Token struct { - AccessToken string `json:"accessToken"` - Authority string `json:"_authority"` - ClientID string `json:"_clientId"` - ExpiresOn string `json:"expiresOn"` - IdentityProvider string `json:"identityProvider"` - IsMRRT bool `json:"isMRRT"` - RefreshToken string `json:"refreshToken"` - Resource string `json:"resource"` - TokenType string `json:"tokenType"` - UserID string `json:"userId"` -} - -const accessTokensJSON = "accessTokens.json" - -// ToADALToken converts an Azure CLI `Token`` to an `adal.Token`` -func (t Token) ToADALToken() (converted adal.Token, err error) { - tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn) - if err != nil { - err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err) - return - } - - difference := tokenExpirationDate.Sub(date.UnixEpoch()) - - converted = adal.Token{ - AccessToken: t.AccessToken, - Type: t.TokenType, - ExpiresIn: "3600", - ExpiresOn: json.Number(strconv.Itoa(int(difference.Seconds()))), - RefreshToken: t.RefreshToken, - Resource: t.Resource, - } - return -} - -// AccessTokensPath returns the path where access tokens are stored from the Azure CLI -// TODO(#199): add unit test. -func AccessTokensPath() (string, error) { - // Azure-CLI allows user to customize the path of access tokens through environment variable. - if accessTokenPath := os.Getenv("AZURE_ACCESS_TOKEN_FILE"); accessTokenPath != "" { - return accessTokenPath, nil - } - - // Azure-CLI allows user to customize the path to Azure config directory through environment variable. - if cfgDir := configDir(); cfgDir != "" { - return filepath.Join(cfgDir, accessTokensJSON), nil - } - - // Fallback logic to default path on non-cloud-shell environment. - // TODO(#200): remove the dependency on hard-coding path. - return homedir.Expand("~/.azure/" + accessTokensJSON) -} - -// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object -func ParseExpirationDate(input string) (*time.Time, error) { - // CloudShell (and potentially the Azure CLI in future) - expirationDate, cloudShellErr := time.Parse(time.RFC3339, input) - if cloudShellErr != nil { - // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone) - const cliFormat = "2006-01-02 15:04:05.999999" - expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local) - if cliErr == nil { - return &expirationDate, nil - } - - return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr) - } - - return &expirationDate, nil -} - -// LoadTokens restores a set of Token objects from a file located at 'path'. -func LoadTokens(path string) ([]Token, error) { - file, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) - } - defer file.Close() - - var tokens []Token - - dec := json.NewDecoder(file) - if err = dec.Decode(&tokens); err != nil { - return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err) - } - - return tokens, nil -} - -// GetTokenFromCLI gets a token using Azure CLI 2.0 for local development scenarios. -func GetTokenFromCLI(resource string) (*Token, error) { - return GetTokenFromCLIWithParams(GetAccessTokenParams{Resource: resource}) -} - -// GetAccessTokenParams is the parameter struct of GetTokenFromCLIWithParams -type GetAccessTokenParams struct { - Resource string - ResourceType string - Subscription string - Tenant string -} - -// GetTokenFromCLIWithParams gets a token using Azure CLI 2.0 for local development scenarios. -func GetTokenFromCLIWithParams(params GetAccessTokenParams) (*Token, error) { - cliCmd := GetAzureCLICommand() - - cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json") - if params.Resource != "" { - if err := validateParameter(params.Resource); err != nil { - return nil, err - } - cliCmd.Args = append(cliCmd.Args, "--resource", params.Resource) - } - if params.ResourceType != "" { - if err := validateParameter(params.ResourceType); err != nil { - return nil, err - } - cliCmd.Args = append(cliCmd.Args, "--resource-type", params.ResourceType) - } - if params.Subscription != "" { - if err := validateParameter(params.Subscription); err != nil { - return nil, err - } - cliCmd.Args = append(cliCmd.Args, "--subscription", params.Subscription) - } - if params.Tenant != "" { - if err := validateParameter(params.Tenant); err != nil { - return nil, err - } - cliCmd.Args = append(cliCmd.Args, "--tenant", params.Tenant) - } - - var stderr bytes.Buffer - cliCmd.Stderr = &stderr - - output, err := cliCmd.Output() - if err != nil { - if stderr.Len() > 0 { - return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String()) - } - - return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", err.Error()) - } - - tokenResponse := Token{} - err = json.Unmarshal(output, &tokenResponse) - if err != nil { - return nil, err - } - - return &tokenResponse, err -} - -func validateParameter(param string) error { - // Validate parameters, since it gets sent as a command line argument to Azure CLI - const invalidResourceErrorTemplate = "Parameter %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed." - match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", param) - if err != nil { - return err - } - if !match { - return fmt.Errorf(invalidResourceErrorTemplate, param) - } - return nil -} - -// GetAzureCLICommand can be used to run arbitrary Azure CLI command -func GetAzureCLICommand() *exec.Cmd { - // This is the path that a developer can set to tell this class what the install path for Azure CLI is. - const azureCLIPath = "AzureCLIPath" - - // The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI. - azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles")) - - // Default path for non-Windows. - const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin" - - // Execute Azure CLI to get token - var cliCmd *exec.Cmd - if runtime.GOOS == "windows" { - cliCmd = exec.Command(fmt.Sprintf("%s\\system32\\cmd.exe", os.Getenv("windir"))) - cliCmd.Env = os.Environ() - cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s;%s", os.Getenv(azureCLIPath), azureCLIDefaultPathWindows)) - cliCmd.Args = append(cliCmd.Args, "/c", "az") - } else { - cliCmd = exec.Command("az") - cliCmd.Env = os.Environ() - cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath)) - } - - return cliCmd -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go deleted file mode 100644 index b0a53769f2..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ /dev/null @@ -1,331 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "strings" -) - -const ( - // EnvironmentFilepathName captures the name of the environment variable containing the path to the file - // to be used while populating the Azure Environment. - EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" - - // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. - NotAvailable = "N/A" -) - -var environments = map[string]Environment{ - "AZURECHINACLOUD": ChinaCloud, - "AZUREGERMANCLOUD": GermanCloud, - "AZURECLOUD": PublicCloud, - "AZUREPUBLICCLOUD": PublicCloud, - "AZUREUSGOVERNMENT": USGovernmentCloud, - "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, //TODO: deprecate -} - -// ResourceIdentifier contains a set of Azure resource IDs. -type ResourceIdentifier struct { - Graph string `json:"graph"` - KeyVault string `json:"keyVault"` - Datalake string `json:"datalake"` - Batch string `json:"batch"` - OperationalInsights string `json:"operationalInsights"` - OSSRDBMS string `json:"ossRDBMS"` - Storage string `json:"storage"` - Synapse string `json:"synapse"` - ServiceBus string `json:"serviceBus"` - SQLDatabase string `json:"sqlDatabase"` - CosmosDB string `json:"cosmosDB"` - ManagedHSM string `json:"managedHSM"` - MicrosoftGraph string `json:"microsoftGraph"` -} - -// Environment represents a set of endpoints for each of Azure's Clouds. -type Environment struct { - Name string `json:"name"` - ManagementPortalURL string `json:"managementPortalURL"` - PublishSettingsURL string `json:"publishSettingsURL"` - ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` - ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` - ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` - GalleryEndpoint string `json:"galleryEndpoint"` - KeyVaultEndpoint string `json:"keyVaultEndpoint"` - ManagedHSMEndpoint string `json:"managedHSMEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - ServiceBusEndpoint string `json:"serviceBusEndpoint"` - BatchManagementEndpoint string `json:"batchManagementEndpoint"` - MicrosoftGraphEndpoint string `json:"microsoftGraphEndpoint"` - StorageEndpointSuffix string `json:"storageEndpointSuffix"` - CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` - MariaDBDNSSuffix string `json:"mariaDBDNSSuffix"` - MySQLDatabaseDNSSuffix string `json:"mySqlDatabaseDNSSuffix"` - PostgresqlDatabaseDNSSuffix string `json:"postgresqlDatabaseDNSSuffix"` - SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` - TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` - KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` - ManagedHSMDNSSuffix string `json:"managedHSMDNSSuffix"` - ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` - ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` - ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` - ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` - TokenAudience string `json:"tokenAudience"` - APIManagementHostNameSuffix string `json:"apiManagementHostNameSuffix"` - SynapseEndpointSuffix string `json:"synapseEndpointSuffix"` - DatalakeSuffix string `json:"datalakeSuffix"` - ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` -} - -var ( - // PublicCloud is the default public Azure cloud environment - PublicCloud = Environment{ - Name: "AzurePublicCloud", - ManagementPortalURL: "https://manage.windowsazure.com/", - PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.windows.net/", - ResourceManagerEndpoint: "https://management.azure.com/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", - GalleryEndpoint: "https://gallery.azure.com/", - KeyVaultEndpoint: "https://vault.azure.net/", - ManagedHSMEndpoint: "https://managedhsm.azure.net/", - GraphEndpoint: "https://graph.windows.net/", - ServiceBusEndpoint: "https://servicebus.windows.net/", - BatchManagementEndpoint: "https://batch.core.windows.net/", - MicrosoftGraphEndpoint: "https://graph.microsoft.com/", - StorageEndpointSuffix: "core.windows.net", - CosmosDBDNSSuffix: "documents.azure.com", - MariaDBDNSSuffix: "mariadb.database.azure.com", - MySQLDatabaseDNSSuffix: "mysql.database.azure.com", - PostgresqlDatabaseDNSSuffix: "postgres.database.azure.com", - SQLDatabaseDNSSuffix: "database.windows.net", - TrafficManagerDNSSuffix: "trafficmanager.net", - KeyVaultDNSSuffix: "vault.azure.net", - ManagedHSMDNSSuffix: "managedhsm.azure.net", - ServiceBusEndpointSuffix: "servicebus.windows.net", - ServiceManagementVMDNSSuffix: "cloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.azure.com", - ContainerRegistryDNSSuffix: "azurecr.io", - TokenAudience: "https://management.azure.com/", - APIManagementHostNameSuffix: "azure-api.net", - SynapseEndpointSuffix: "dev.azuresynapse.net", - DatalakeSuffix: "azuredatalakestore.net", - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.windows.net/", - KeyVault: "https://vault.azure.net", - Datalake: "https://datalake.azure.net/", - Batch: "https://batch.core.windows.net/", - OperationalInsights: "https://api.loganalytics.io", - OSSRDBMS: "https://ossrdbms-aad.database.windows.net", - Storage: "https://storage.azure.com/", - Synapse: "https://dev.azuresynapse.net", - ServiceBus: "https://servicebus.azure.net/", - SQLDatabase: "https://database.windows.net/", - CosmosDB: "https://cosmos.azure.com", - ManagedHSM: "https://managedhsm.azure.net", - MicrosoftGraph: "https://graph.microsoft.com/", - }, - } - - // USGovernmentCloud is the cloud environment for the US Government - USGovernmentCloud = Environment{ - Name: "AzureUSGovernmentCloud", - ManagementPortalURL: "https://manage.windowsazure.us/", - PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", - ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", - GalleryEndpoint: "https://gallery.usgovcloudapi.net/", - KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", - ManagedHSMEndpoint: NotAvailable, - GraphEndpoint: "https://graph.windows.net/", - ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", - BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", - MicrosoftGraphEndpoint: "https://graph.microsoft.us/", - StorageEndpointSuffix: "core.usgovcloudapi.net", - CosmosDBDNSSuffix: "documents.azure.us", - MariaDBDNSSuffix: "mariadb.database.usgovcloudapi.net", - MySQLDatabaseDNSSuffix: "mysql.database.usgovcloudapi.net", - PostgresqlDatabaseDNSSuffix: "postgres.database.usgovcloudapi.net", - SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", - TrafficManagerDNSSuffix: "usgovtrafficmanager.net", - KeyVaultDNSSuffix: "vault.usgovcloudapi.net", - ManagedHSMDNSSuffix: NotAvailable, - ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", - ServiceManagementVMDNSSuffix: "usgovcloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net", - ContainerRegistryDNSSuffix: "azurecr.us", - TokenAudience: "https://management.usgovcloudapi.net/", - APIManagementHostNameSuffix: "azure-api.us", - SynapseEndpointSuffix: "dev.azuresynapse.usgovcloudapi.net", - DatalakeSuffix: NotAvailable, - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.windows.net/", - KeyVault: "https://vault.usgovcloudapi.net", - Datalake: NotAvailable, - Batch: "https://batch.core.usgovcloudapi.net/", - OperationalInsights: "https://api.loganalytics.us", - OSSRDBMS: "https://ossrdbms-aad.database.usgovcloudapi.net", - Storage: "https://storage.azure.com/", - Synapse: "https://dev.azuresynapse.usgovcloudapi.net", - ServiceBus: "https://servicebus.azure.net/", - SQLDatabase: "https://database.usgovcloudapi.net/", - CosmosDB: "https://cosmos.azure.com", - ManagedHSM: NotAvailable, - MicrosoftGraph: "https://graph.microsoft.us/", - }, - } - - // ChinaCloud is the cloud environment operated in China - ChinaCloud = Environment{ - Name: "AzureChinaCloud", - ManagementPortalURL: "https://manage.chinacloudapi.com/", - PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", - ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", - ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", - GalleryEndpoint: "https://gallery.chinacloudapi.cn/", - KeyVaultEndpoint: "https://vault.azure.cn/", - ManagedHSMEndpoint: NotAvailable, - GraphEndpoint: "https://graph.chinacloudapi.cn/", - ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", - BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", - MicrosoftGraphEndpoint: "https://microsoftgraph.chinacloudapi.cn/", - StorageEndpointSuffix: "core.chinacloudapi.cn", - CosmosDBDNSSuffix: "documents.azure.cn", - MariaDBDNSSuffix: "mariadb.database.chinacloudapi.cn", - MySQLDatabaseDNSSuffix: "mysql.database.chinacloudapi.cn", - PostgresqlDatabaseDNSSuffix: "postgres.database.chinacloudapi.cn", - SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", - TrafficManagerDNSSuffix: "trafficmanager.cn", - KeyVaultDNSSuffix: "vault.azure.cn", - ManagedHSMDNSSuffix: NotAvailable, - ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", - ServiceManagementVMDNSSuffix: "chinacloudapp.cn", - ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn", - ContainerRegistryDNSSuffix: "azurecr.cn", - TokenAudience: "https://management.chinacloudapi.cn/", - APIManagementHostNameSuffix: "azure-api.cn", - SynapseEndpointSuffix: "dev.azuresynapse.azure.cn", - DatalakeSuffix: NotAvailable, - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.chinacloudapi.cn/", - KeyVault: "https://vault.azure.cn", - Datalake: NotAvailable, - Batch: "https://batch.chinacloudapi.cn/", - OperationalInsights: NotAvailable, - OSSRDBMS: "https://ossrdbms-aad.database.chinacloudapi.cn", - Storage: "https://storage.azure.com/", - Synapse: "https://dev.azuresynapse.net", - ServiceBus: "https://servicebus.azure.net/", - SQLDatabase: "https://database.chinacloudapi.cn/", - CosmosDB: "https://cosmos.azure.com", - ManagedHSM: NotAvailable, - MicrosoftGraph: "https://microsoftgraph.chinacloudapi.cn", - }, - } - - // GermanCloud is the cloud environment operated in Germany - GermanCloud = Environment{ - Name: "AzureGermanCloud", - ManagementPortalURL: "http://portal.microsoftazure.de/", - PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.cloudapi.de/", - ResourceManagerEndpoint: "https://management.microsoftazure.de/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", - GalleryEndpoint: "https://gallery.cloudapi.de/", - KeyVaultEndpoint: "https://vault.microsoftazure.de/", - ManagedHSMEndpoint: NotAvailable, - GraphEndpoint: "https://graph.cloudapi.de/", - ServiceBusEndpoint: "https://servicebus.cloudapi.de/", - BatchManagementEndpoint: "https://batch.cloudapi.de/", - MicrosoftGraphEndpoint: NotAvailable, - StorageEndpointSuffix: "core.cloudapi.de", - CosmosDBDNSSuffix: "documents.microsoftazure.de", - MariaDBDNSSuffix: "mariadb.database.cloudapi.de", - MySQLDatabaseDNSSuffix: "mysql.database.cloudapi.de", - PostgresqlDatabaseDNSSuffix: "postgres.database.cloudapi.de", - SQLDatabaseDNSSuffix: "database.cloudapi.de", - TrafficManagerDNSSuffix: "azuretrafficmanager.de", - KeyVaultDNSSuffix: "vault.microsoftazure.de", - ManagedHSMDNSSuffix: NotAvailable, - ServiceBusEndpointSuffix: "servicebus.cloudapi.de", - ServiceManagementVMDNSSuffix: "azurecloudapp.de", - ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", - ContainerRegistryDNSSuffix: NotAvailable, - TokenAudience: "https://management.microsoftazure.de/", - APIManagementHostNameSuffix: NotAvailable, - SynapseEndpointSuffix: NotAvailable, - DatalakeSuffix: NotAvailable, - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.cloudapi.de/", - KeyVault: "https://vault.microsoftazure.de", - Datalake: NotAvailable, - Batch: "https://batch.cloudapi.de/", - OperationalInsights: NotAvailable, - OSSRDBMS: "https://ossrdbms-aad.database.cloudapi.de", - Storage: "https://storage.azure.com/", - Synapse: NotAvailable, - ServiceBus: "https://servicebus.azure.net/", - SQLDatabase: "https://database.cloudapi.de/", - CosmosDB: "https://cosmos.azure.com", - ManagedHSM: NotAvailable, - MicrosoftGraph: NotAvailable, - }, - } -) - -// EnvironmentFromName returns an Environment based on the common name specified. -func EnvironmentFromName(name string) (Environment, error) { - // IMPORTANT - // As per @radhikagupta5: - // This is technical debt, fundamentally here because Kubernetes is not currently accepting - // contributions to the providers. Once that is an option, the provider should be updated to - // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation - // from this method based on the name that is provided to us. - if strings.EqualFold(name, "AZURESTACKCLOUD") { - return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) - } - - name = strings.ToUpper(name) - env, ok := environments[name] - if !ok { - return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) - } - - return env, nil -} - -// EnvironmentFromFile loads an Environment from a configuration file available on disk. -// This function is particularly useful in the Hybrid Cloud model, where one must define their own -// endpoints. -func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { - fileContents, err := ioutil.ReadFile(location) - if err != nil { - return - } - - err = json.Unmarshal(fileContents, &unmarshaled) - - return -} - -// SetEnvironment updates the environment map with the specified values. -func SetEnvironment(name string, env Environment) { - environments[strings.ToUpper(name)] = env -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go deleted file mode 100644 index 507f9e95cf..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go +++ /dev/null @@ -1,245 +0,0 @@ -package azure - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - - "github.com/Azure/go-autorest/autorest" -) - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -type audience []string - -type authentication struct { - LoginEndpoint string `json:"loginEndpoint"` - Audiences audience `json:"audiences"` -} - -type environmentMetadataInfo struct { - GalleryEndpoint string `json:"galleryEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - PortalEndpoint string `json:"portalEndpoint"` - Authentication authentication `json:"authentication"` -} - -// EnvironmentProperty represent property names that clients can override -type EnvironmentProperty string - -const ( - // EnvironmentName ... - EnvironmentName EnvironmentProperty = "name" - // EnvironmentManagementPortalURL .. - EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" - // EnvironmentPublishSettingsURL ... - EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" - // EnvironmentServiceManagementEndpoint ... - EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" - // EnvironmentResourceManagerEndpoint ... - EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" - // EnvironmentActiveDirectoryEndpoint ... - EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" - // EnvironmentGalleryEndpoint ... - EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" - // EnvironmentKeyVaultEndpoint ... - EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" - // EnvironmentGraphEndpoint ... - EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" - // EnvironmentServiceBusEndpoint ... - EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" - // EnvironmentBatchManagementEndpoint ... - EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" - // EnvironmentStorageEndpointSuffix ... - EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" - // EnvironmentSQLDatabaseDNSSuffix ... - EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" - // EnvironmentTrafficManagerDNSSuffix ... - EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" - // EnvironmentKeyVaultDNSSuffix ... - EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" - // EnvironmentServiceBusEndpointSuffix ... - EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" - // EnvironmentServiceManagementVMDNSSuffix ... - EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" - // EnvironmentResourceManagerVMDNSSuffix ... - EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" - // EnvironmentContainerRegistryDNSSuffix ... - EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" - // EnvironmentTokenAudience ... - EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" -) - -// OverrideProperty represents property name and value that clients can override -type OverrideProperty struct { - Key EnvironmentProperty - Value string -} - -// EnvironmentFromURL loads an Environment from a URL -// This function is particularly useful in the Hybrid Cloud model, where one may define their own -// endpoints. -func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { - var metadataEnvProperties environmentMetadataInfo - - if resourceManagerEndpoint == "" { - return environment, fmt.Errorf("Metadata resource manager endpoint is empty") - } - - if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { - return environment, err - } - - // Give priority to user's override values - overrideProperties(&environment, properties) - - if environment.Name == "" { - environment.Name = "HybridEnvironment" - } - stampDNSSuffix := environment.StorageEndpointSuffix - if stampDNSSuffix == "" { - stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") - environment.StorageEndpointSuffix = stampDNSSuffix - } - if environment.KeyVaultDNSSuffix == "" { - environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) - } - if environment.KeyVaultEndpoint == "" { - environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) - } - if environment.TokenAudience == "" { - environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] - } - if environment.ActiveDirectoryEndpoint == "" { - environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint - } - if environment.ResourceManagerEndpoint == "" { - environment.ResourceManagerEndpoint = resourceManagerEndpoint - } - if environment.GalleryEndpoint == "" { - environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint - } - if environment.GraphEndpoint == "" { - environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint - } - - return environment, nil -} - -func overrideProperties(environment *Environment, properties []OverrideProperty) { - for _, property := range properties { - switch property.Key { - case EnvironmentName: - { - environment.Name = property.Value - } - case EnvironmentManagementPortalURL: - { - environment.ManagementPortalURL = property.Value - } - case EnvironmentPublishSettingsURL: - { - environment.PublishSettingsURL = property.Value - } - case EnvironmentServiceManagementEndpoint: - { - environment.ServiceManagementEndpoint = property.Value - } - case EnvironmentResourceManagerEndpoint: - { - environment.ResourceManagerEndpoint = property.Value - } - case EnvironmentActiveDirectoryEndpoint: - { - environment.ActiveDirectoryEndpoint = property.Value - } - case EnvironmentGalleryEndpoint: - { - environment.GalleryEndpoint = property.Value - } - case EnvironmentKeyVaultEndpoint: - { - environment.KeyVaultEndpoint = property.Value - } - case EnvironmentGraphEndpoint: - { - environment.GraphEndpoint = property.Value - } - case EnvironmentServiceBusEndpoint: - { - environment.ServiceBusEndpoint = property.Value - } - case EnvironmentBatchManagementEndpoint: - { - environment.BatchManagementEndpoint = property.Value - } - case EnvironmentStorageEndpointSuffix: - { - environment.StorageEndpointSuffix = property.Value - } - case EnvironmentSQLDatabaseDNSSuffix: - { - environment.SQLDatabaseDNSSuffix = property.Value - } - case EnvironmentTrafficManagerDNSSuffix: - { - environment.TrafficManagerDNSSuffix = property.Value - } - case EnvironmentKeyVaultDNSSuffix: - { - environment.KeyVaultDNSSuffix = property.Value - } - case EnvironmentServiceBusEndpointSuffix: - { - environment.ServiceBusEndpointSuffix = property.Value - } - case EnvironmentServiceManagementVMDNSSuffix: - { - environment.ServiceManagementVMDNSSuffix = property.Value - } - case EnvironmentResourceManagerVMDNSSuffix: - { - environment.ResourceManagerVMDNSSuffix = property.Value - } - case EnvironmentContainerRegistryDNSSuffix: - { - environment.ContainerRegistryDNSSuffix = property.Value - } - case EnvironmentTokenAudience: - { - environment.TokenAudience = property.Value - } - } - } -} - -func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { - client := autorest.NewClientWithUserAgent("") - managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") - req, _ := http.NewRequest("GET", managementEndpoint, nil) - response, err := client.Do(req) - if err != nil { - return environment, err - } - defer response.Body.Close() - jsonResponse, err := ioutil.ReadAll(response.Body) - if err != nil { - return environment, err - } - err = json.Unmarshal(jsonResponse, &environment) - return environment, err -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go deleted file mode 100644 index 5b52357f95..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package azure - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" -) - -// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. -// It also handles request retries -func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := autorest.NewRetriableRequest(r) - for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - - resp, err = autorest.SendWithSender(s, rr.Request(), - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return resp, err - } - - if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { - return resp, err - } - - var re RequestError - if strings.Contains(r.Header.Get("Content-Type"), "xml") { - // XML errors (e.g. Storage Data Plane) only return the inner object - err = autorest.Respond(resp, autorest.ByUnmarshallingXML(&re.ServiceError)) - } else { - err = autorest.Respond(resp, autorest.ByUnmarshallingJSON(&re)) - } - - if err != nil { - return resp, err - } - err = re - - if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { - regErr := register(client, r, re) - if regErr != nil { - return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %w", regErr, err) - } - } - } - return resp, err - }) - } -} - -func getProvider(re RequestError) (string, error) { - if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { - return re.ServiceError.Details[0]["target"].(string), nil - } - return "", errors.New("provider was not found in the response") -} - -func register(client autorest.Client, originalReq *http.Request, re RequestError) error { - subID := getSubscription(originalReq.URL.Path) - if subID == "" { - return errors.New("missing parameter subscriptionID to register resource provider") - } - providerName, err := getProvider(re) - if err != nil { - return fmt.Errorf("missing parameter provider to register resource provider: %s", err) - } - newURL := url.URL{ - Scheme: originalReq.URL.Scheme, - Host: originalReq.URL.Host, - } - - // taken from the resources SDK - // with almost identical code, this sections are easier to mantain - // It is also not a good idea to import the SDK here - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 - pathParameters := map[string]interface{}{ - "resourceProviderNamespace": autorest.Encode("path", providerName), - "subscriptionId": autorest.Encode("path", subID), - } - - const APIVersion = "2016-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - - req, err := preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req = req.WithContext(originalReq.Context()) - - resp, err := autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - type Provider struct { - RegistrationState *string `json:"registrationState,omitempty"` - } - var provider Provider - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - // poll for registered provisioning state - registrationStartTime := time.Now() - for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { - // taken from the resources SDK - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - req, err = preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req = req.WithContext(originalReq.Context()) - - resp, err := autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - if provider.RegistrationState != nil && - *provider.RegistrationState == "Registered" { - break - } - - delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) - if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { - return originalReq.Context().Err() - } - } - if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { - return errors.New("polling for resource provider registration has exceeded the polling duration") - } - return err -} - -func getSubscription(path string) string { - parts := strings.Split(path, "/") - for i, v := range parts { - if v == "subscriptions" && (i+1) < len(parts) { - return parts[i+1] - } - } - return "" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go deleted file mode 100644 index bb5f9396e9..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ /dev/null @@ -1,328 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "strings" - "time" - - "github.com/Azure/go-autorest/logger" -) - -const ( - // DefaultPollingDelay is a reasonable delay between polling requests. - DefaultPollingDelay = 30 * time.Second - - // DefaultPollingDuration is a reasonable total polling duration. - DefaultPollingDuration = 15 * time.Minute - - // DefaultRetryAttempts is number of attempts for retry status codes (5xx). - DefaultRetryAttempts = 3 - - // DefaultRetryDuration is the duration to wait between retries. - DefaultRetryDuration = 30 * time.Second -) - -var ( - // StatusCodesForRetry are a defined group of status code for which the client will retry - StatusCodesForRetry = []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } -) - -const ( - requestFormat = `HTTP Request Begin =================================================== -%s -===================================================== HTTP Request End -` - responseFormat = `HTTP Response Begin =================================================== -%s -===================================================== HTTP Response End -` -) - -// Response serves as the base for all responses from generated clients. It provides access to the -// last http.Response. -type Response struct { - *http.Response `json:"-"` -} - -// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. -// If there was no response (i.e. the underlying http.Response is nil) the return value is false. -func (r Response) IsHTTPStatus(statusCode int) bool { - if r.Response == nil { - return false - } - return r.Response.StatusCode == statusCode -} - -// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. -// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided -// the return value is false. -func (r Response) HasHTTPStatus(statusCodes ...int) bool { - return ResponseHasStatusCode(r.Response, statusCodes...) -} - -// LoggingInspector implements request and response inspectors that log the full request and -// response to a supplied log. -type LoggingInspector struct { - Logger *log.Logger -} - -// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) WithInspection() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - var body, b bytes.Buffer - - defer r.Body.Close() - - r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) - if err := r.Write(&b); err != nil { - return nil, fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(requestFormat, b.String()) - - r.Body = ioutil.NopCloser(&body) - return p.Prepare(r) - }) - } -} - -// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) ByInspecting() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - var body, b bytes.Buffer - defer resp.Body.Close() - resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) - if err := resp.Write(&b); err != nil { - return fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(responseFormat, b.String()) - - resp.Body = ioutil.NopCloser(&body) - return r.Respond(resp) - }) - } -} - -// Client is the base for autorest generated clients. It provides default, "do nothing" -// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the -// standard, undecorated http.Client as a default Sender. -// -// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and -// return responses that compose with Response. -// -// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom -// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit -// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence -// sending the request by providing a decorated Sender. -type Client struct { - Authorizer Authorizer - Sender Sender - RequestInspector PrepareDecorator - ResponseInspector RespondDecorator - - // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header - PollingDelay time.Duration - - // PollingDuration sets the maximum polling time after which an error is returned. - // Setting this to zero will use the provided context to control the duration. - PollingDuration time.Duration - - // RetryAttempts sets the total number of times the client will attempt to make an HTTP request. - // Set the value to 1 to disable retries. DO NOT set the value to less than 1. - RetryAttempts int - - // RetryDuration sets the delay duration for retries. - RetryDuration time.Duration - - // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent - // through the Do method. - UserAgent string - - Jar http.CookieJar - - // Set to true to skip attempted registration of resource providers (false by default). - SkipResourceProviderRegistration bool - - // SendDecorators can be used to override the default chain of SendDecorators. - // This can be used to specify things like a custom retry SendDecorator. - // Set this to an empty slice to use no SendDecorators. - SendDecorators []SendDecorator -} - -// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed -// string. -func NewClientWithUserAgent(ua string) Client { - return newClient(ua, tls.RenegotiateNever) -} - -// ClientOptions contains various Client configuration options. -type ClientOptions struct { - // UserAgent is an optional user-agent string to append to the default user agent. - UserAgent string - - // Renegotiation is an optional setting to control client-side TLS renegotiation. - Renegotiation tls.RenegotiationSupport -} - -// NewClientWithOptions returns an instance of a Client with the specified values. -func NewClientWithOptions(options ClientOptions) Client { - return newClient(options.UserAgent, options.Renegotiation) -} - -func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { - c := Client{ - PollingDelay: DefaultPollingDelay, - PollingDuration: DefaultPollingDuration, - RetryAttempts: DefaultRetryAttempts, - RetryDuration: DefaultRetryDuration, - UserAgent: UserAgent(), - } - c.Sender = c.sender(renegotiation) - c.AddToUserAgent(ua) - return c -} - -// AddToUserAgent adds an extension to the current user agent -func (c *Client) AddToUserAgent(extension string) error { - if extension != "" { - c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) -} - -// Do implements the Sender interface by invoking the active Sender after applying authorization. -// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent -// is set, apply set the User-Agent header. -func (c Client) Do(r *http.Request) (*http.Response, error) { - if r.UserAgent() == "" { - r, _ = Prepare(r, - WithUserAgent(c.UserAgent)) - } - // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations - r, err := Prepare(r, - c.WithAuthorization(), - c.WithInspection()) - if err != nil { - var resp *http.Response - if detErr, ok := err.(DetailedError); ok { - // if the authorization failed (e.g. invalid credentials) there will - // be a response associated with the error, be sure to return it. - resp = detErr.Response - } - return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") - } - logger.Instance.WriteRequest(r, logger.Filter{ - Header: func(k string, v []string) (bool, []string) { - // remove the auth token from the log - if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { - v = []string{"**REDACTED**"} - } - return true, v - }, - }) - resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) - if resp == nil && err == nil { - err = errors.New("autorest: received nil response and error") - } - logger.Instance.WriteResponse(resp, logger.Filter{}) - Respond(resp, c.ByInspecting()) - return resp, err -} - -// sender returns the Sender to which to send requests. -func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { - if c.Sender == nil { - return sender(renengotiation) - } - return c.Sender -} - -// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator -// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. -func (c Client) WithAuthorization() PrepareDecorator { - return c.authorizer().WithAuthorization() -} - -// authorizer returns the Authorizer to use. -func (c Client) authorizer() Authorizer { - if c.Authorizer == nil { - return NullAuthorizer{} - } - return c.Authorizer -} - -// WithInspection is a convenience method that passes the request to the supplied RequestInspector, -// if present, or returns the WithNothing PrepareDecorator otherwise. -func (c Client) WithInspection() PrepareDecorator { - if c.RequestInspector == nil { - return WithNothing() - } - return c.RequestInspector -} - -// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, -// if present, or returns the ByIgnoring RespondDecorator otherwise. -func (c Client) ByInspecting() RespondDecorator { - if c.ResponseInspector == nil { - return ByIgnoring() - } - return c.ResponseInspector -} - -// Send sends the provided http.Request using the client's Sender or the default sender. -// It returns the http.Response and possible error. It also accepts a, possibly empty, -// default set of SendDecorators used when sending the request. -// SendDecorators have the following precedence: -// 1. In a request's context via WithSendDecorators() -// 2. Specified on the client in SendDecorators -// 3. The default values specified in this method -func (c Client) Send(req *http.Request, decorators ...SendDecorator) (*http.Response, error) { - if c.SendDecorators != nil { - decorators = c.SendDecorators - } - inCtx := req.Context().Value(ctxSendDecorators{}) - if sd, ok := inCtx.([]SendDecorator); ok { - decorators = sd - } - return SendWithSender(c, req, decorators...) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE deleted file mode 100644 index b9d6a27ea9..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go deleted file mode 100644 index c457106568..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/date.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) -defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of -time.Time types. And both convert to time.Time through a ToTime method. -*/ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "time" -) - -const ( - fullDate = "2006-01-02" - fullDateJSON = `"2006-01-02"` - dateFormat = "%04d-%02d-%02d" - jsonFormat = `"%04d-%02d-%02d"` -) - -// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., -// 2006-01-02). -type Date struct { - time.Time -} - -// ParseDate create a new Date from the passed string. -func ParseDate(date string) (d Date, err error) { - return parseDate(date, fullDate) -} - -func parseDate(date string, format string) (Date, error) { - d, err := time.Parse(format, date) - return Date{Time: d}, err -} - -// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalBinary() ([]byte, error) { - return d.MarshalText() -} - -// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalBinary(data []byte) error { - return d.UnmarshalText(data) -} - -// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalJSON() (json []byte, err error) { - return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalJSON(data []byte) (err error) { - d.Time, err = time.Parse(fullDateJSON, string(data)) - return err -} - -// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalText() (text []byte, err error) { - return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalText(data []byte) (err error) { - d.Time, err = time.Parse(fullDate, string(data)) - return err -} - -// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). -func (d Date) String() string { - return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) -} - -// ToTime returns a Date as a time.Time -func (d Date) ToTime() time.Time { - return d.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go deleted file mode 100644 index 4e05432071..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go deleted file mode 100644 index b453fad049..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/time.go +++ /dev/null @@ -1,103 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "regexp" - "time" -) - -// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -const ( - azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` - azureUtcFormat = "2006-01-02T15:04:05.999999999" - rfc3339JSON = `"` + time.RFC3339Nano + `"` - rfc3339 = time.RFC3339Nano - tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` -) - -// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -type Time struct { - time.Time -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalBinary() ([]byte, error) { - return t.Time.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalJSON() (json []byte, err error) { - return t.Time.MarshalJSON() -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalJSON(data []byte) (err error) { - timeFormat := azureUtcFormatJSON - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339JSON - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalText() (text []byte, err error) { - return t.Time.MarshalText() -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalText(data []byte) (err error) { - timeFormat := azureUtcFormat - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339 - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// String returns the Time formatted as an RFC3339 date-time string (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) String() string { - // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} - -// ToTime returns a Time as a time.Time -func (t Time) ToTime() time.Time { - return t.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go deleted file mode 100644 index 48fb39ba9b..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go +++ /dev/null @@ -1,100 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "time" -) - -const ( - rfc1123JSON = `"` + time.RFC1123 + `"` - rfc1123 = time.RFC1123 -) - -// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -type TimeRFC1123 struct { - time.Time -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123JSON, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalJSON() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") - } - b := []byte(t.Format(rfc1123JSON)) - return b, nil -} - -// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalText() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") - } - - b := []byte(t.Format(rfc1123)) - return b, nil -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalBinary() ([]byte, error) { - return t.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// ToTime returns a Time as a time.Time -func (t TimeRFC1123) ToTime() time.Time { - return t.Time -} - -// String returns the Time formatted as an RFC1123 date-time string (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) String() string { - // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go deleted file mode 100644 index 7073959b2a..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go +++ /dev/null @@ -1,123 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "time" -) - -// unixEpoch is the moment in time that should be treated as timestamp 0. -var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) - -// UnixTime marshals and unmarshals a time that is represented as the number -// of seconds (ignoring skip-seconds) since the Unix Epoch. -type UnixTime time.Time - -// Duration returns the time as a Duration since the UnixEpoch. -func (t UnixTime) Duration() time.Duration { - return time.Time(t).Sub(unixEpoch) -} - -// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. -func NewUnixTimeFromSeconds(seconds float64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) -} - -// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. -func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(nanoseconds)) -} - -// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. -func NewUnixTimeFromDuration(dur time.Duration) UnixTime { - return UnixTime(unixEpoch.Add(dur)) -} - -// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' -func UnixEpoch() time.Time { - return unixEpoch -} - -// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. -// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) -func (t UnixTime) MarshalJSON() ([]byte, error) { - buffer := &bytes.Buffer{} - enc := json.NewEncoder(buffer) - err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) - if err != nil { - return nil, err - } - return buffer.Bytes(), nil -} - -// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since -// midnight January 1st, 1970. -func (t *UnixTime) UnmarshalJSON(text []byte) error { - dec := json.NewDecoder(bytes.NewReader(text)) - - var secondsSinceEpoch float64 - if err := dec.Decode(&secondsSinceEpoch); err != nil { - return err - } - - *t = NewUnixTimeFromSeconds(secondsSinceEpoch) - - return nil -} - -// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. -func (t UnixTime) MarshalText() ([]byte, error) { - cast := time.Time(t) - return cast.MarshalText() -} - -// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. -func (t *UnixTime) UnmarshalText(raw []byte) error { - var unmarshaled time.Time - - if err := unmarshaled.UnmarshalText(raw); err != nil { - return err - } - - *t = UnixTime(unmarshaled) - return nil -} - -// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. -func (t UnixTime) MarshalBinary() ([]byte, error) { - buf := &bytes.Buffer{} - - payload := int64(t.Duration()) - - if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. -func (t *UnixTime) UnmarshalBinary(raw []byte) error { - var nanosecondsSinceEpoch int64 - - if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { - return err - } - *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go deleted file mode 100644 index 12addf0ebb..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go +++ /dev/null @@ -1,25 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "strings" - "time" -) - -// ParseTime to parse Time string to specified format. -func ParseTime(format string, t string) (d time.Time, err error) { - return time.Parse(format, strings.ToUpper(t)) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go deleted file mode 100644 index 35098eda8e..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/error.go +++ /dev/null @@ -1,103 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" -) - -const ( - // UndefinedStatusCode is used when HTTP status code is not available for an error. - UndefinedStatusCode = 0 -) - -// DetailedError encloses a error with details of the package, method, and associated HTTP -// status code (if any). -type DetailedError struct { - Original error - - // PackageType is the package type of the object emitting the error. For types, the value - // matches that produced the the '%T' format specifier of the fmt package. For other elements, - // such as functions, it is just the package name (e.g., "autorest"). - PackageType string - - // Method is the name of the method raising the error. - Method string - - // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. - StatusCode interface{} - - // Message is the error message. - Message string - - // Service Error is the response body of failed API in bytes - ServiceError []byte - - // Response is the response object that was returned during failure if applicable. - Response *http.Response -} - -// NewError creates a new Error conforming object from the passed packageType, method, and -// message. message is treated as a format string to which the optional args apply. -func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, nil, message, args...) -} - -// NewErrorWithResponse creates a new Error conforming object from the passed -// packageType, method, statusCode of the given resp (UndefinedStatusCode if -// resp is nil), and message. message is treated as a format string to which the -// optional args apply. -func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, resp, message, args...) -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - if v, ok := original.(DetailedError); ok { - return v - } - - statusCode := UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - - return DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - Response: resp, - } -} - -// Error returns a formatted containing all available details (i.e., PackageType, Method, -// StatusCode, Message, and original error (if any)). -func (e DetailedError) Error() string { - if e.Original == nil { - return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) - } - return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) -} - -// Unwrap returns the original error. -func (e DetailedError) Unwrap() error { - return e.Original -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go deleted file mode 100644 index 792f82d4b6..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build modhack -// +build modhack - -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go deleted file mode 100644 index 121a66fa88..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ /dev/null @@ -1,549 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "context" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "mime/multipart" - "net/http" - "net/url" - "strings" -) - -const ( - mimeTypeJSON = "application/json" - mimeTypeOctetStream = "application/octet-stream" - mimeTypeFormPost = "application/x-www-form-urlencoded" - - headerAuthorization = "Authorization" - headerAuxAuthorization = "x-ms-authorization-auxiliary" - headerContentType = "Content-Type" - headerUserAgent = "User-Agent" -) - -// used as a key type in context.WithValue() -type ctxPrepareDecorators struct{} - -// WithPrepareDecorators adds the specified PrepareDecorators to the provided context. -// If no PrepareDecorators are provided the context is unchanged. -func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context { - if len(prepareDecorator) == 0 { - return ctx - } - return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator) -} - -// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators. -func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator { - inCtx := ctx.Value(ctxPrepareDecorators{}) - if pd, ok := inCtx.([]PrepareDecorator); ok { - return pd - } - return defaultPrepareDecorators -} - -// Preparer is the interface that wraps the Prepare method. -// -// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations -// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. -type Preparer interface { - Prepare(*http.Request) (*http.Request, error) -} - -// PreparerFunc is a method that implements the Preparer interface. -type PreparerFunc func(*http.Request) (*http.Request, error) - -// Prepare implements the Preparer interface on PreparerFunc. -func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { - return pf(r) -} - -// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then affect the result. -type PrepareDecorator func(Preparer) Preparer - -// CreatePreparer creates, decorates, and returns a Preparer. -// Without decorators, the returned Preparer returns the passed http.Request unmodified. -// Preparers are safe to share and re-use. -func CreatePreparer(decorators ...PrepareDecorator) Preparer { - return DecoratePreparer( - Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), - decorators...) -} - -// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it -// applies to the Preparer. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (change the http.Request and then pass it -// along) or a post-decorator (pass the http.Request along and alter it on return). -func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { - for _, decorate := range decorators { - p = decorate(p) - } - return p -} - -// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. -// It creates a Preparer from the decorators which it then applies to the passed http.Request. -func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { - if r == nil { - return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") - } - return CreatePreparer(decorators...).Prepare(r) -} - -// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed -// http.Request. -func WithNothing() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - return p.Prepare(r) - }) - } -} - -// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to -// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before -// adding the header. -func WithHeader(header string, value string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - setHeader(r, http.CanonicalHeaderKey(header), value) - } - return r, err - }) - } -} - -// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to -// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before -// adding them. -func WithHeaders(headers map[string]interface{}) PrepareDecorator { - h := ensureValueStrings(headers) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.Header == nil { - r.Header = make(http.Header) - } - - for name, value := range h { - r.Header.Set(http.CanonicalHeaderKey(name), value) - } - } - return r, err - }) - } -} - -// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the supplied token. -func WithBearerAuthorization(token string) PrepareDecorator { - return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) -} - -// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value -// is the passed contentType. -func AsContentType(contentType string) PrepareDecorator { - return WithHeader(headerContentType, contentType) -} - -// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the -// passed string. -func WithUserAgent(ua string) PrepareDecorator { - return WithHeader(headerUserAgent, ua) -} - -// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/x-www-form-urlencoded". -func AsFormURLEncoded() PrepareDecorator { - return AsContentType(mimeTypeFormPost) -} - -// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/json". -func AsJSON() PrepareDecorator { - return AsContentType(mimeTypeJSON) -} - -// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. -func AsOctetStream() PrepareDecorator { - return AsContentType(mimeTypeOctetStream) -} - -// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The -// decorator does not validate that the passed method string is a known HTTP method. -func WithMethod(method string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r.Method = method - return p.Prepare(r) - }) - } -} - -// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. -func AsDelete() PrepareDecorator { return WithMethod("DELETE") } - -// AsGet returns a PrepareDecorator that sets the HTTP method to GET. -func AsGet() PrepareDecorator { return WithMethod("GET") } - -// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. -func AsHead() PrepareDecorator { return WithMethod("HEAD") } - -// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. -func AsMerge() PrepareDecorator { return WithMethod("MERGE") } - -// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. -func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } - -// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. -func AsPatch() PrepareDecorator { return WithMethod("PATCH") } - -// AsPost returns a PrepareDecorator that sets the HTTP method to POST. -func AsPost() PrepareDecorator { return WithMethod("POST") } - -// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. -func AsPut() PrepareDecorator { return WithMethod("PUT") } - -// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed -// from the supplied baseUrl. Query parameters will be encoded as required. -func WithBaseURL(baseURL string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var u *url.URL - if u, err = url.Parse(baseURL); err != nil { - return r, err - } - if u.Scheme == "" { - return r, fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) - } - if u.RawQuery != "" { - // handle unencoded semicolons (ideally the server would send them already encoded) - u.RawQuery = strings.Replace(u.RawQuery, ";", "%3B", -1) - q, err := url.ParseQuery(u.RawQuery) - if err != nil { - return r, err - } - u.RawQuery = q.Encode() - } - r.URL = u - } - return r, err - }) - } -} - -// WithBytes returns a PrepareDecorator that takes a list of bytes -// which passes the bytes directly to the body -func WithBytes(input *[]byte) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if input == nil { - return r, fmt.Errorf("Input Bytes was nil") - } - - r.ContentLength = int64(len(*input)) - r.Body = ioutil.NopCloser(bytes.NewReader(*input)) - } - return r, err - }) - } -} - -// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the -// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. -func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(urlParameters) - for key, value := range parameters { - baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) - } - return WithBaseURL(baseURL) -} - -// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the -// http.Request body. -func WithFormData(v url.Values) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - s := v.Encode() - - setHeader(r, http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) - r.ContentLength = int64(len(s)) - r.Body = ioutil.NopCloser(strings.NewReader(s)) - } - return r, err - }) - } -} - -// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters -// into the http.Request body. -func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var body bytes.Buffer - writer := multipart.NewWriter(&body) - for key, value := range formDataParameters { - if rc, ok := value.(io.ReadCloser); ok { - var fd io.Writer - if fd, err = writer.CreateFormFile(key, key); err != nil { - return r, err - } - if _, err = io.Copy(fd, rc); err != nil { - return r, err - } - } else { - if err = writer.WriteField(key, ensureValueString(value)); err != nil { - return r, err - } - } - } - if err = writer.Close(); err != nil { - return r, err - } - setHeader(r, http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) - r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) - r.ContentLength = int64(body.Len()) - return r, err - } - return r, err - }) - } -} - -// WithFile returns a PrepareDecorator that sends file in request body. -func WithFile(f io.ReadCloser) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := ioutil.ReadAll(f) - if err != nil { - return r, err - } - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - r.ContentLength = int64(len(b)) - } - return r, err - }) - } -} - -// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request -// and sets the Content-Length header. -func WithBool(v bool) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the -// request and sets the Content-Length header. -func WithFloat32(v float32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the -// request and sets the Content-Length header. -func WithFloat64(v float64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request -// and sets the Content-Length header. -func WithInt32(v int32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request -// and sets the Content-Length header. -func WithInt64(v int64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithString returns a PrepareDecorator that encodes the passed string into the body of the request -// and sets the Content-Length header. -func WithString(v string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - r.ContentLength = int64(len(v)) - r.Body = ioutil.NopCloser(strings.NewReader(v)) - } - return r, err - }) - } -} - -// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the -// request and sets the Content-Length header. -func WithJSON(v interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := json.Marshal(v) - if err == nil { - r.ContentLength = int64(len(b)) - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - } - return r, err - }) - } -} - -// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the -// request and sets the Content-Length header. -func WithXML(v interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := xml.Marshal(v) - if err == nil { - // we have to tack on an XML header - withHeader := xml.Header + string(b) - bytesWithHeader := []byte(withHeader) - - r.ContentLength = int64(len(bytesWithHeader)) - setHeader(r, headerContentLength, fmt.Sprintf("%d", len(bytesWithHeader))) - r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) - } - } - return r, err - }) - } -} - -// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path -// is absolute (that is, it begins with a "/"), it replaces the existing path. -func WithPath(path string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPath", "Invoked with a nil URL") - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The -// values will be escaped (aka URL encoded) before insertion into the path. -func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := escapeValueStrings(ensureValueStrings(pathParameters)) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. -func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(pathParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -func parseURL(u *url.URL, path string) (*url.URL, error) { - p := strings.TrimRight(u.String(), "/") - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - return url.Parse(p + path) -} - -// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters -// given in the supplied map (i.e., key=value). -func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { - parameters := MapToValues(queryParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") - } - v := r.URL.Query() - for key, value := range parameters { - for i := range value { - d, err := url.QueryUnescape(value[i]) - if err != nil { - return r, err - } - value[i] = d - } - v[key] = value - } - r.URL.RawQuery = v.Encode() - } - return r, err - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go deleted file mode 100644 index 349e1963a2..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ /dev/null @@ -1,269 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" -) - -// Responder is the interface that wraps the Respond method. -// -// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold -// state since Responders may be shared and re-used. -type Responder interface { - Respond(*http.Response) error -} - -// ResponderFunc is a method that implements the Responder interface. -type ResponderFunc func(*http.Response) error - -// Respond implements the Responder interface on ResponderFunc. -func (rf ResponderFunc) Respond(r *http.Response) error { - return rf(r) -} - -// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to -// the http.Response and pass it along or, first, pass the http.Response along then react. -type RespondDecorator func(Responder) Responder - -// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned -// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share -// and re-used: It depends on the applied decorators. For example, a standard decorator that closes -// the response body is fine to share whereas a decorator that reads the body into a passed struct -// is not. -// -// To prevent memory leaks, ensure that at least one Responder closes the response body. -func CreateResponder(decorators ...RespondDecorator) Responder { - return DecorateResponder( - Responder(ResponderFunc(func(r *http.Response) error { return nil })), - decorators...) -} - -// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it -// applies to the Responder. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (react to the http.Response and then pass it -// along) or a post-decorator (pass the http.Response along and then react). -func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { - for _, decorate := range decorators { - r = decorate(r) - } - return r -} - -// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. -// It creates a Responder from the decorators it then applies to the passed http.Response. -func Respond(r *http.Response, decorators ...RespondDecorator) error { - if r == nil { - return nil - } - return CreateResponder(decorators...).Respond(r) -} - -// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined -// to the next RespondDecorator. -func ByIgnoring() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - return r.Respond(resp) - }) - } -} - -// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as -// the Body is read. -func ByCopying(b *bytes.Buffer) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - resp.Body = TeeReadCloser(resp.Body, b) - } - return err - }) - } -} - -// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which -// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed -// Responder is invoked prior to discarding the response body, the decorator may occur anywhere -// within the set. -func ByDiscardingBody() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { - return fmt.Errorf("Error discarding the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it -// closes the response body. Since the passed Responder is invoked prior to closing the response -// body, the decorator may occur anywhere within the set. -func ByClosing() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which -// it closes the response if the passed Responder returns an error and the response body exists. -func ByClosingIfError() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err != nil && resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingBytes(v *[]byte) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - bytes, errInner := ioutil.ReadAll(resp.Body) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else { - *v = bytes - } - } - return err - }) - } -} - -// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingJSON(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - // Some responses might include a BOM, remove for successful unmarshalling - b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else if len(strings.Trim(string(b), " ")) > 0 { - errInner = json.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingXML(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else { - errInner = xml.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response -// StatusCode is among the set passed. On error, response body is fully read into a buffer and -// presented in the returned error, as well as in the response body. -func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - if resp.Body != nil { - defer resp.Body.Close() - b, _ := ioutil.ReadAll(resp.Body) - derr.ServiceError = b - resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - err = derr - } - return err - }) - } -} - -// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is -// anything other than HTTP 200. -func WithErrorUnlessOK() RespondDecorator { - return WithErrorUnlessStatusCode(http.StatusOK) -} - -// ExtractHeader extracts all values of the specified header from the http.Response. It returns an -// empty string slice if the passed http.Response is nil or the header does not exist. -func ExtractHeader(header string, resp *http.Response) []string { - if resp != nil && resp.Header != nil { - return resp.Header[http.CanonicalHeaderKey(header)] - } - return nil -} - -// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It -// returns an empty string if the passed http.Response is nil or the header does not exist. -func ExtractHeaderValue(header string, resp *http.Response) string { - h := ExtractHeader(header, resp) - if len(h) > 0 { - return h[0] - } - return "" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go deleted file mode 100644 index fa11dbed79..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go +++ /dev/null @@ -1,52 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. -func NewRetriableRequest(req *http.Request) *RetriableRequest { - return &RetriableRequest{req: req} -} - -// Request returns the wrapped HTTP request. -func (rr *RetriableRequest) Request() *http.Request { - return rr.req -} - -func (rr *RetriableRequest) prepareFromByteReader() (err error) { - // fall back to making a copy (only do this once) - b := []byte{} - if rr.req.ContentLength > 0 { - b = make([]byte, rr.req.ContentLength) - _, err = io.ReadFull(rr.req.Body, b) - if err != nil { - return err - } - } else { - b, err = ioutil.ReadAll(rr.req.Body) - if err != nil { - return err - } - } - rr.br = bytes.NewReader(b) - rr.req.Body = ioutil.NopCloser(rr.br) - return err -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go deleted file mode 100644 index 4c87030e81..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go +++ /dev/null @@ -1,55 +0,0 @@ -//go:build !go1.8 -// +build !go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.br != nil { - _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.ContentLength = 0 -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go deleted file mode 100644 index 05847c08ba..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go +++ /dev/null @@ -1,67 +0,0 @@ -//go:build go1.8 -// +build go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - rc io.ReadCloser - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.rc != nil { - rr.req.Body = rr.rc - } else if rr.br != nil { - _, err = rr.br.Seek(0, io.SeekStart) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.req.GetBody != nil { - // this will allow us to preserve the body without having to - // make a copy. note we need to do this on each iteration - rr.rc, err = rr.req.GetBody() - if err != nil { - return err - } - } else if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.GetBody = nil - req.ContentLength = 0 -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go deleted file mode 100644 index 118de81411..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ /dev/null @@ -1,458 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "crypto/tls" - "fmt" - "log" - "math" - "net" - "net/http" - "net/http/cookiejar" - "strconv" - "sync" - "time" - - "github.com/Azure/go-autorest/logger" - "github.com/Azure/go-autorest/tracing" -) - -// there is one sender per TLS renegotiation type, i.e. count of tls.RenegotiationSupport enums -const defaultSendersCount = 3 - -type defaultSender struct { - sender Sender - init *sync.Once -} - -// each type of sender will be created on demand in sender() -var defaultSenders [defaultSendersCount]defaultSender - -func init() { - for i := 0; i < defaultSendersCount; i++ { - defaultSenders[i].init = &sync.Once{} - } -} - -// used as a key type in context.WithValue() -type ctxSendDecorators struct{} - -// WithSendDecorators adds the specified SendDecorators to the provided context. -// If no SendDecorators are provided the context is unchanged. -func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { - if len(sendDecorator) == 0 { - return ctx - } - return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) -} - -// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. -func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { - inCtx := ctx.Value(ctxSendDecorators{}) - if sd, ok := inCtx.([]SendDecorator); ok { - return sd - } - return defaultSendDecorators -} - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(sender(tls.RenegotiateNever), decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} - -// Send sends, by means of the default http.Client, the passed http.Request, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// Send is a convenience method and not recommended for production. Advanced users should use -// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). -// -// Send will not poll or retry requests. -func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return SendWithSender(sender(tls.RenegotiateNever), r, decorators...) -} - -// SendWithSender sends the passed http.Request, through the provided Sender, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// SendWithSender will not poll or retry requests. -func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return DecorateSender(s, decorators...).Do(r) -} - -func sender(renengotiation tls.RenegotiationSupport) Sender { - // note that we can't init defaultSenders in init() since it will - // execute before calling code has had a chance to enable tracing - defaultSenders[renengotiation].init.Do(func() { - // copied from http.DefaultTransport with a TLS minimum version. - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - ForceAttemptHTTP2: true, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - Renegotiation: renengotiation, - }, - } - var roundTripper http.RoundTripper = transport - if tracing.IsEnabled() { - roundTripper = tracing.NewTransport(transport) - } - j, _ := cookiejar.New(nil) - defaultSenders[renengotiation].sender = &http.Client{Jar: j, Transport: roundTripper} - }) - return defaultSenders[renengotiation].sender -} - -// AfterDelay returns a SendDecorator that delays for the passed time.Duration before -// invoking the Sender. The delay may be terminated by closing the optional channel on the -// http.Request. If canceled, no further Senders are invoked. -func AfterDelay(d time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - if !DelayForBackoff(d, 0, r.Context().Done()) { - return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") - } - return s.Do(r) - }) - } -} - -// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. -func AsIs() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return s.Do(r) - }) - } -} - -// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which -// it closes the response if the passed Sender returns an error and the response body exists. -func DoCloseIfError() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - Respond(resp, ByDiscardingBody(), ByClosing()) - } - return resp, err - }) - } -} - -// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is -// among the set passed. Since these are artificial errors, the response body may still require -// closing. -func DoErrorIfStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response -// StatusCode is among the set passed. Since these are artificial errors, the response body -// may still require closing. -func DoErrorUnlessStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the -// passed status codes. It expects the http.Response to contain a Location header providing the -// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than -// the supplied duration. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by -// closing the optional channel on the http.Request. -func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - resp, err = s.Do(r) - - if err == nil && ResponseHasStatusCode(resp, codes...) { - r, err = NewPollingRequestWithContext(r.Context(), resp) - - for err == nil && ResponseHasStatusCode(resp, codes...) { - Respond(resp, - ByDiscardingBody(), - ByClosing()) - resp, err = SendWithSender(s, r, - AfterDelay(GetRetryAfter(resp, delay))) - } - } - - return resp, err - }) - } -} - -// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. -func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - for attempt := 0; attempt < attempts; attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - DrainResponseBody(resp) - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - logger.Instance.Writef(logger.LogError, "DoRetryForAttempts: received error for attempt %d: %v\n", attempt+1, err) - if !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() - } - } - return resp, err - }) - } -} - -// Count429AsRetry indicates that a 429 response should be included as a retry attempt. -var Count429AsRetry = true - -// Max429Delay is the maximum duration to wait between retries on a 429 if no Retry-After header was received. -var Max429Delay time.Duration - -// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. -// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. -func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, 0, codes...) - }) - } -} - -// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the -// specified number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater -// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. -func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, cap, codes...) - }) - } -} - -func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - // Increment to add the first call (attempts denotes number of retries) - for attempt, delayCount := 0, 0; attempt < attempts+1; { - err = rr.Prepare() - if err != nil { - return - } - DrainResponseBody(resp) - resp, err = s.Do(rr.Request()) - // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication - // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. - if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { - return resp, err - } - if err != nil { - logger.Instance.Writef(logger.LogError, "DoRetryForStatusCodes: received error for attempt %d: %v\n", attempt+1, err) - } - delayed := DelayWithRetryAfter(resp, r.Context().Done()) - // if this was a 429 set the delay cap as specified. - // applicable only in the absence of a retry-after header. - if resp != nil && resp.StatusCode == http.StatusTooManyRequests { - cap = Max429Delay - } - if !delayed && !DelayForBackoffWithCap(backoff, cap, delayCount, r.Context().Done()) { - return resp, r.Context().Err() - } - // when count429 == false don't count a 429 against the number - // of attempts so that we continue to retry until it succeeds - if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { - attempt++ - } - // delay count is tracked separately from attempts to - // ensure that 429 participates in exponential back-off - delayCount++ - } - return resp, err -} - -// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. -// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. -// The function returns true after successfully waiting for the specified duration. If there is -// no Retry-After header or the wait is cancelled the return value is false. -func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { - if resp == nil { - return false - } - var dur time.Duration - ra := resp.Header.Get("Retry-After") - if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { - dur = time.Duration(retryAfter) * time.Second - } else if t, err := time.Parse(time.RFC1123, ra); err == nil { - dur = t.Sub(time.Now()) - } - if dur > 0 { - select { - case <-time.After(dur): - return true - case <-cancel: - return false - } - } - return false -} - -// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal -// to or greater than the specified duration, exponentially backing off between requests using the -// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the -// optional channel on the http.Request. -func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - end := time.Now().Add(d) - for attempt := 0; time.Now().Before(end); attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - DrainResponseBody(resp) - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - logger.Instance.Writef(logger.LogError, "DoRetryForDuration: received error for attempt %d: %v\n", attempt+1, err) - if !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() - } - } - return resp, err - }) - } -} - -// WithLogging returns a SendDecorator that implements simple before and after logging of the -// request. -func WithLogging(logger *log.Logger) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - logger.Printf("Sending %s %s", r.Method, r.URL) - resp, err := s.Do(r) - if err != nil { - logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) - } else { - logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) - } - return resp, err - }) - } -} - -// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of -// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set -// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, -// returns false. -// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt -// count. -func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { - return DelayForBackoffWithCap(backoff, 0, attempt, cancel) -} - -// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of -// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set -// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. -// The delay may be canceled by closing the passed channel. If terminated early, returns false. -// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt -// count. -func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { - d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second - if cap > 0 && d > cap { - d = cap - } - logger.Instance.Writef(logger.LogInfo, "DelayForBackoffWithCap: sleeping for %s\n", d) - select { - case <-time.After(d): - return true - case <-cancel: - return false - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go deleted file mode 100644 index d35b3850ab..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ /dev/null @@ -1,232 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "reflect" - "strings" -) - -// EncodedAs is a series of constants specifying various data encodings -type EncodedAs string - -const ( - // EncodedAsJSON states that data is encoded as JSON - EncodedAsJSON EncodedAs = "JSON" - - // EncodedAsXML states that data is encoded as Xml - EncodedAsXML EncodedAs = "XML" -) - -// Decoder defines the decoding method json.Decoder and xml.Decoder share -type Decoder interface { - Decode(v interface{}) error -} - -// NewDecoder creates a new decoder appropriate to the passed encoding. -// encodedAs specifies the type of encoding and r supplies the io.Reader containing the -// encoded data. -func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { - if encodedAs == EncodedAsJSON { - return json.NewDecoder(r) - } else if encodedAs == EncodedAsXML { - return xml.NewDecoder(r) - } - return nil -} - -// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy -// is especially useful if there is a chance the data will fail to decode. -// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v -// is the decoding destination. -func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (b bytes.Buffer, err error) { - err = NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) - return -} - -// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. -// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. -// Further, when it is closed, it ensures that rc is closed as well. -func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { - return &teeReadCloser{rc, io.TeeReader(rc, w)} -} - -type teeReadCloser struct { - rc io.ReadCloser - r io.Reader -} - -func (t *teeReadCloser) Read(p []byte) (int, error) { - return t.r.Read(p) -} - -func (t *teeReadCloser) Close() error { - return t.rc.Close() -} - -func containsInt(ints []int, n int) bool { - for _, i := range ints { - if i == n { - return true - } - } - return false -} - -func escapeValueStrings(m map[string]string) map[string]string { - for key, value := range m { - m[key] = url.QueryEscape(value) - } - return m -} - -func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { - mapOfStrings := make(map[string]string) - for key, value := range mapOfInterface { - mapOfStrings[key] = ensureValueString(value) - } - return mapOfStrings -} - -func ensureValueString(value interface{}) string { - if value == nil { - return "" - } - switch v := value.(type) { - case string: - return v - case []byte: - return string(v) - default: - return fmt.Sprintf("%v", v) - } -} - -// MapToValues method converts map[string]interface{} to url.Values. -func MapToValues(m map[string]interface{}) url.Values { - v := url.Values{} - for key, value := range m { - x := reflect.ValueOf(value) - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - for i := 0; i < x.Len(); i++ { - v.Add(key, ensureValueString(x.Index(i))) - } - } else { - v.Add(key, ensureValueString(value)) - } - } - return v -} - -// AsStringSlice method converts interface{} to []string. -// s must be of type slice or array or an error is returned. -// Each element of s will be converted to its string representation. -func AsStringSlice(s interface{}) ([]string, error) { - v := reflect.ValueOf(s) - if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { - return nil, NewError("autorest", "AsStringSlice", "the value's type is not a slice or array.") - } - stringSlice := make([]string, 0, v.Len()) - - for i := 0; i < v.Len(); i++ { - stringSlice = append(stringSlice, fmt.Sprintf("%v", v.Index(i))) - } - return stringSlice, nil -} - -// String method converts interface v to string. If interface is a list, it -// joins list elements using the separator. Note that only sep[0] will be used for -// joining if any separator is specified. -func String(v interface{}, sep ...string) string { - if len(sep) == 0 { - return ensureValueString(v) - } - stringSlice, ok := v.([]string) - if ok == false { - var err error - stringSlice, err = AsStringSlice(v) - if err != nil { - panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) - } - } - return ensureValueString(strings.Join(stringSlice, sep[0])) -} - -// Encode method encodes url path and query parameters. -func Encode(location string, v interface{}, sep ...string) string { - s := String(v, sep...) - switch strings.ToLower(location) { - case "path": - return pathEscape(s) - case "query": - return queryEscape(s) - default: - return s - } -} - -func pathEscape(s string) string { - return strings.Replace(url.QueryEscape(s), "+", "%20", -1) -} - -func queryEscape(s string) string { - return url.QueryEscape(s) -} - -// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). -// This is mainly useful for long-running operations that use the Azure-AsyncOperation -// header, so we change the initial PUT into a GET to retrieve the final result. -func ChangeToGet(req *http.Request) *http.Request { - req.Method = "GET" - req.Body = nil - req.ContentLength = 0 - req.Header.Del("Content-Length") - return req -} - -// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false -// if it's not. If the error doesn't implement the net.Error interface the return value is true. -func IsTemporaryNetworkError(err error) bool { - if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { - return true - } - return false -} - -// DrainResponseBody reads the response body then closes it. -func DrainResponseBody(resp *http.Response) error { - if resp != nil && resp.Body != nil { - _, err := io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - return err - } - return nil -} - -func setHeader(r *http.Request, key, value string) { - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(key, value) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go deleted file mode 100644 index 3133fcc08e..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build go1.13 -// +build go1.13 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "errors" - - "github.com/Azure/go-autorest/autorest/adal" -) - -// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError interface. -func IsTokenRefreshError(err error) bool { - var tre adal.TokenRefreshError - return errors.As(err, &tre) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go deleted file mode 100644 index 851e152db4..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build !go1.13 -// +build !go1.13 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import "github.com/Azure/go-autorest/autorest/adal" - -// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError -// interface. If err is a DetailedError it will walk the chain of Original errors. -func IsTokenRefreshError(err error) bool { - if _, ok := err.(adal.TokenRefreshError); ok { - return true - } - if de, ok := err.(DetailedError); ok { - return IsTokenRefreshError(de.Original) - } - return false -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go deleted file mode 100644 index 713e23581d..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ /dev/null @@ -1,41 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "runtime" -) - -const number = "v14.2.1" - -var ( - userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - number, - ) -) - -// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. -func UserAgent() string { - return userAgent -} - -// Version returns the semantic version (see http://semver.org). -func Version() string { - return number -} diff --git a/vendor/github.com/Azure/go-autorest/azure-pipelines.yml b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml deleted file mode 100644 index 6fb8404fd0..0000000000 --- a/vendor/github.com/Azure/go-autorest/azure-pipelines.yml +++ /dev/null @@ -1,105 +0,0 @@ -variables: - GOPATH: '$(system.defaultWorkingDirectory)/work' - sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)' - -jobs: - - job: 'goautorest' - displayName: 'Run go-autorest CI Checks' - - strategy: - matrix: - Linux_Go113: - vm.image: 'ubuntu-18.04' - go.version: '1.13' - Linux_Go114: - vm.image: 'ubuntu-18.04' - go.version: '1.14' - - pool: - vmImage: '$(vm.image)' - - steps: - - task: GoTool@0 - inputs: - version: '$(go.version)' - displayName: "Select Go Version" - - - script: | - set -e - mkdir -p '$(GOPATH)/bin' - mkdir -p '$(sdkPath)' - shopt -s extglob - mv !(work) '$(sdkPath)' - echo '##vso[task.prependpath]$(GOPATH)/bin' - displayName: 'Create Go Workspace' - - - script: | - set -e - curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh - dep ensure -v - go install ./vendor/golang.org/x/lint/golint - go get github.com/jstemmer/go-junit-report - go get github.com/axw/gocov/gocov - go get github.com/AlekSi/gocov-xml - go get -u github.com/matm/gocov-html - workingDirectory: '$(sdkPath)' - displayName: 'Install Dependencies' - - - script: | - go vet ./autorest/... - go vet ./logger/... - go vet ./tracing/... - workingDirectory: '$(sdkPath)' - displayName: 'Vet' - - - script: | - go build -v ./autorest/... - go build -v ./logger/... - go build -v ./tracing/... - workingDirectory: '$(sdkPath)' - displayName: 'Build' - - - script: | - set -e - go test -race -v -coverprofile=coverage.txt -covermode atomic ./autorest/... ./logger/... ./tracing/... 2>&1 | go-junit-report > report.xml - gocov convert coverage.txt > coverage.json - gocov-xml < coverage.json > coverage.xml - gocov-html < coverage.json > coverage.html - workingDirectory: '$(sdkPath)' - displayName: 'Run Tests' - - - script: grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee >&2 - workingDirectory: '$(sdkPath)' - displayName: 'Copyright Header Check' - failOnStderr: true - condition: succeededOrFailed() - - - script: | - gofmt -s -l -w ./autorest/. >&2 - gofmt -s -l -w ./logger/. >&2 - gofmt -s -l -w ./tracing/. >&2 - workingDirectory: '$(sdkPath)' - displayName: 'Format Check' - failOnStderr: true - condition: succeededOrFailed() - - - script: | - golint ./autorest/... >&2 - golint ./logger/... >&2 - golint ./tracing/... >&2 - workingDirectory: '$(sdkPath)' - displayName: 'Linter Check' - failOnStderr: true - condition: succeededOrFailed() - - - task: PublishTestResults@2 - inputs: - testRunner: JUnit - testResultsFiles: $(sdkPath)/report.xml - failTaskOnFailedTests: true - - - task: PublishCodeCoverageResults@1 - inputs: - codeCoverageTool: Cobertura - summaryFileLocation: $(sdkPath)/coverage.xml - additionalCodeCoverageFiles: $(sdkPath)/coverage.html diff --git a/vendor/github.com/Azure/go-autorest/doc.go b/vendor/github.com/Azure/go-autorest/doc.go deleted file mode 100644 index 99ae6ca988..0000000000 --- a/vendor/github.com/Azure/go-autorest/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Package go-autorest provides an HTTP request client for use with Autorest-generated API client packages. -*/ -package go_autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE deleted file mode 100644 index b9d6a27ea9..0000000000 --- a/vendor/github.com/Azure/go-autorest/logger/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go deleted file mode 100644 index 0aa27680db..0000000000 --- a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package logger - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go deleted file mode 100644 index 2f5d8cc1a1..0000000000 --- a/vendor/github.com/Azure/go-autorest/logger/logger.go +++ /dev/null @@ -1,337 +0,0 @@ -package logger - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "strings" - "sync" - "time" -) - -// LevelType tells a logger the minimum level to log. When code reports a log entry, -// the LogLevel indicates the level of the log entry. The logger only records entries -// whose level is at least the level it was told to log. See the Log* constants. -// For example, if a logger is configured with LogError, then LogError, LogPanic, -// and LogFatal entries will be logged; lower level entries are ignored. -type LevelType uint32 - -const ( - // LogNone tells a logger not to log any entries passed to it. - LogNone LevelType = iota - - // LogFatal tells a logger to log all LogFatal entries passed to it. - LogFatal - - // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. - LogPanic - - // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. - LogError - - // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogWarning - - // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogInfo - - // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogDebug - - // LogAuth is a special case of LogDebug, it tells a logger to also log the body of an authentication request and response. - // NOTE: this can disclose sensitive information, use with care. - LogAuth -) - -const ( - logNone = "NONE" - logFatal = "FATAL" - logPanic = "PANIC" - logError = "ERROR" - logWarning = "WARNING" - logInfo = "INFO" - logDebug = "DEBUG" - logAuth = "AUTH" - logUnknown = "UNKNOWN" -) - -// ParseLevel converts the specified string into the corresponding LevelType. -func ParseLevel(s string) (lt LevelType, err error) { - switch strings.ToUpper(s) { - case logFatal: - lt = LogFatal - case logPanic: - lt = LogPanic - case logError: - lt = LogError - case logWarning: - lt = LogWarning - case logInfo: - lt = LogInfo - case logDebug: - lt = LogDebug - case logAuth: - lt = LogAuth - default: - err = fmt.Errorf("bad log level '%s'", s) - } - return -} - -// String implements the stringer interface for LevelType. -func (lt LevelType) String() string { - switch lt { - case LogNone: - return logNone - case LogFatal: - return logFatal - case LogPanic: - return logPanic - case LogError: - return logError - case LogWarning: - return logWarning - case LogInfo: - return logInfo - case LogDebug: - return logDebug - case LogAuth: - return logAuth - default: - return logUnknown - } -} - -// Filter defines functions for filtering HTTP request/response content. -type Filter struct { - // URL returns a potentially modified string representation of a request URL. - URL func(u *url.URL) string - - // Header returns a potentially modified set of values for the specified key. - // To completely exclude the header key/values return false. - Header func(key string, val []string) (bool, []string) - - // Body returns a potentially modified request/response body. - Body func(b []byte) []byte -} - -func (f Filter) processURL(u *url.URL) string { - if f.URL == nil { - return u.String() - } - return f.URL(u) -} - -func (f Filter) processHeader(k string, val []string) (bool, []string) { - if f.Header == nil { - return true, val - } - return f.Header(k, val) -} - -func (f Filter) processBody(b []byte) []byte { - if f.Body == nil { - return b - } - return f.Body(b) -} - -// Writer defines methods for writing to a logging facility. -type Writer interface { - // Writeln writes the specified message with the standard log entry header and new-line character. - Writeln(level LevelType, message string) - - // Writef writes the specified format specifier with the standard log entry header and no new-line character. - Writef(level LevelType, format string, a ...interface{}) - - // WriteRequest writes the specified HTTP request to the logger if the log level is greater than - // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. - // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no request content is excluded. - WriteRequest(req *http.Request, filter Filter) - - // WriteResponse writes the specified HTTP response to the logger if the log level is greater than - // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. - // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no response content is excluded. - WriteResponse(resp *http.Response, filter Filter) -} - -// Instance is the default log writer initialized during package init. -// This can be replaced with a custom implementation as required. -var Instance Writer - -// default log level -var logLevel = LogNone - -// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. -// If no value was specified the default value is LogNone. -// Custom loggers can call this to retrieve the configured log level. -func Level() LevelType { - return logLevel -} - -func init() { - // separated for testing purposes - initDefaultLogger() -} - -func initDefaultLogger() { - // init with nilLogger so callers don't have to do a nil check on Default - Instance = nilLogger{} - llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) - if llStr == "" { - return - } - var err error - logLevel, err = ParseLevel(llStr) - if err != nil { - fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) - return - } - if logLevel == LogNone { - return - } - // default to stderr - dest := os.Stderr - lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") - if strings.EqualFold(lfStr, "stdout") { - dest = os.Stdout - } else if lfStr != "" { - lf, err := os.Create(lfStr) - if err == nil { - dest = lf - } else { - fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) - } - } - Instance = fileLogger{ - logLevel: logLevel, - mu: &sync.Mutex{}, - logFile: dest, - } -} - -// the nil logger does nothing -type nilLogger struct{} - -func (nilLogger) Writeln(LevelType, string) {} - -func (nilLogger) Writef(LevelType, string, ...interface{}) {} - -func (nilLogger) WriteRequest(*http.Request, Filter) {} - -func (nilLogger) WriteResponse(*http.Response, Filter) {} - -// A File is used instead of a Logger so the stream can be flushed after every write. -type fileLogger struct { - logLevel LevelType - mu *sync.Mutex // for synchronizing writes to logFile - logFile *os.File -} - -func (fl fileLogger) Writeln(level LevelType, message string) { - fl.Writef(level, "%s\n", message) -} - -func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { - if fl.logLevel >= level { - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) - fl.logFile.Sync() - } -} - -func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { - if req == nil || fl.logLevel < LogInfo { - return - } - b := &bytes.Buffer{} - fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) - // dump headers - for k, v := range req.Header { - if ok, mv := filter.processHeader(k, v); ok { - fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) - } - } - if fl.shouldLogBody(req.Header, req.Body) { - // dump body - body, err := ioutil.ReadAll(req.Body) - if err == nil { - fmt.Fprintln(b, string(filter.processBody(body))) - if nc, ok := req.Body.(io.Seeker); ok { - // rewind to the beginning - nc.Seek(0, io.SeekStart) - } else { - // recreate the body - req.Body = ioutil.NopCloser(bytes.NewReader(body)) - } - } else { - fmt.Fprintf(b, "failed to read body: %v\n", err) - } - } - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprint(fl.logFile, b.String()) - fl.logFile.Sync() -} - -func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { - if resp == nil || fl.logLevel < LogInfo { - return - } - b := &bytes.Buffer{} - fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) - // dump headers - for k, v := range resp.Header { - if ok, mv := filter.processHeader(k, v); ok { - fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) - } - } - if fl.shouldLogBody(resp.Header, resp.Body) { - // dump body - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err == nil { - fmt.Fprintln(b, string(filter.processBody(body))) - resp.Body = ioutil.NopCloser(bytes.NewReader(body)) - } else { - fmt.Fprintf(b, "failed to read body: %v\n", err) - } - } - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprint(fl.logFile, b.String()) - fl.logFile.Sync() -} - -// returns true if the provided body should be included in the log -func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { - ct := header.Get("Content-Type") - return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") -} - -// creates standard header for log entries, it contains a timestamp and the log level -func entryHeader(level LevelType) string { - // this format provides a fixed number of digits so the size of the timestamp is constant - return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) -} diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE deleted file mode 100644 index b9d6a27ea9..0000000000 --- a/vendor/github.com/Azure/go-autorest/tracing/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go deleted file mode 100644 index e163975cd4..0000000000 --- a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package tracing - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go deleted file mode 100644 index 0e7a6e9625..0000000000 --- a/vendor/github.com/Azure/go-autorest/tracing/tracing.go +++ /dev/null @@ -1,67 +0,0 @@ -package tracing - -// Copyright 2018 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "net/http" -) - -// Tracer represents an HTTP tracing facility. -type Tracer interface { - NewTransport(base *http.Transport) http.RoundTripper - StartSpan(ctx context.Context, name string) context.Context - EndSpan(ctx context.Context, httpStatusCode int, err error) -} - -var ( - tracer Tracer -) - -// Register will register the provided Tracer. Pass nil to unregister a Tracer. -func Register(t Tracer) { - tracer = t -} - -// IsEnabled returns true if a Tracer has been registered. -func IsEnabled() bool { - return tracer != nil -} - -// NewTransport creates a new instrumenting http.RoundTripper for the -// registered Tracer. If no Tracer has been registered it returns nil. -func NewTransport(base *http.Transport) http.RoundTripper { - if tracer != nil { - return tracer.NewTransport(base) - } - return nil -} - -// StartSpan starts a trace span with the specified name, associating it with the -// provided context. Has no effect if a Tracer has not been registered. -func StartSpan(ctx context.Context, name string) context.Context { - if tracer != nil { - return tracer.StartSpan(ctx, name) - } - return ctx -} - -// EndSpan ends a previously started span stored in the context. -// Has no effect if a Tracer has not been registered. -func EndSpan(ctx context.Context, httpStatusCode int, err error) { - if tracer != nil { - tracer.EndSpan(ctx, httpStatusCode, err) - } -} diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore deleted file mode 100644 index fe79e3adda..0000000000 --- a/vendor/github.com/BurntSushi/toml/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/toml.test -/toml-test diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING deleted file mode 100644 index 01b5743200..0000000000 --- a/vendor/github.com/BurntSushi/toml/COPYING +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 TOML authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md deleted file mode 100644 index 3651cfa960..0000000000 --- a/vendor/github.com/BurntSushi/toml/README.md +++ /dev/null @@ -1,120 +0,0 @@ -TOML stands for Tom's Obvious, Minimal Language. This Go package provides a -reflection interface similar to Go's standard library `json` and `xml` packages. - -Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). - -Documentation: https://godocs.io/github.com/BurntSushi/toml - -See the [releases page](https://github.com/BurntSushi/toml/releases) for a -changelog; this information is also in the git tag annotations (e.g. `git show -v0.4.0`). - -This library requires Go 1.13 or newer; add it to your go.mod with: - - % go get github.com/BurntSushi/toml@latest - -It also comes with a TOML validator CLI tool: - - % go install github.com/BurntSushi/toml/cmd/tomlv@latest - % tomlv some-toml-file.toml - -### Examples -For the simplest example, consider some TOML file as just a list of keys and -values: - -```toml -Age = 25 -Cats = [ "Cauchy", "Plato" ] -Pi = 3.14 -Perfection = [ 6, 28, 496, 8128 ] -DOB = 1987-07-05T05:45:00Z -``` - -Which can be decoded with: - -```go -type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time -} - -var conf Config -_, err := toml.Decode(tomlData, &conf) -``` - -You can also use struct tags if your struct field name doesn't map to a TOML key -value directly: - -```toml -some_key_NAME = "wat" -``` - -```go -type TOML struct { - ObscureKey string `toml:"some_key_NAME"` -} -``` - -Beware that like other decoders **only exported fields** are considered when -encoding and decoding; private fields are silently ignored. - -### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces -Here's an example that automatically parses values in a `mail.Address`: - -```toml -contacts = [ - "Donald Duck ", - "Scrooge McDuck ", -] -``` - -Can be decoded with: - -```go -// Create address type which satisfies the encoding.TextUnmarshaler interface. -type address struct { - *mail.Address -} - -func (a *address) UnmarshalText(text []byte) error { - var err error - a.Address, err = mail.ParseAddress(string(text)) - return err -} - -// Decode it. -func decode() { - blob := ` - contacts = [ - "Donald Duck ", - "Scrooge McDuck ", - ] - ` - - var contacts struct { - Contacts []address - } - - _, err := toml.Decode(blob, &contacts) - if err != nil { - log.Fatal(err) - } - - for _, c := range contacts.Contacts { - fmt.Printf("%#v\n", c.Address) - } - - // Output: - // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"} - // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"} -} -``` - -To target TOML specifically you can implement `UnmarshalTOML` TOML interface in -a similar way. - -### More complex usage -See the [`_example/`](/_example) directory for a more complex example. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go deleted file mode 100644 index 0ca1dc4fee..0000000000 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ /dev/null @@ -1,602 +0,0 @@ -package toml - -import ( - "bytes" - "encoding" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "math" - "os" - "reflect" - "strconv" - "strings" - "time" -) - -// Unmarshaler is the interface implemented by objects that can unmarshal a -// TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -// Unmarshal decodes the contents of data in TOML format into a pointer v. -// -// See [Decoder] for a description of the decoding process. -func Unmarshal(data []byte, v interface{}) error { - _, err := NewDecoder(bytes.NewReader(data)).Decode(v) - return err -} - -// Decode the TOML data in to the pointer v. -// -// See [Decoder] for a description of the decoding process. -func Decode(data string, v interface{}) (MetaData, error) { - return NewDecoder(strings.NewReader(data)).Decode(v) -} - -// DecodeFile reads the contents of a file and decodes it with [Decode]. -func DecodeFile(path string, v interface{}) (MetaData, error) { - fp, err := os.Open(path) - if err != nil { - return MetaData{}, err - } - defer fp.Close() - return NewDecoder(fp).Decode(v) -} - -// Primitive is a TOML value that hasn't been decoded into a Go value. -// -// This type can be used for any value, which will cause decoding to be delayed. -// You can use [PrimitiveDecode] to "manually" decode these values. -// -// NOTE: The underlying representation of a `Primitive` value is subject to -// change. Do not rely on it. -// -// NOTE: Primitive values are still parsed, so using them will only avoid the -// overhead of reflection. They can be useful when you don't know the exact type -// of TOML data until runtime. -type Primitive struct { - undecoded interface{} - context Key -} - -// The significand precision for float32 and float64 is 24 and 53 bits; this is -// the range a natural number can be stored in a float without loss of data. -const ( - maxSafeFloat32Int = 16777215 // 2^24-1 - maxSafeFloat64Int = int64(9007199254740991) // 2^53-1 -) - -// Decoder decodes TOML data. -// -// TOML tables correspond to Go structs or maps; they can be used -// interchangeably, but structs offer better type safety. -// -// TOML table arrays correspond to either a slice of structs or a slice of maps. -// -// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the -// local timezone. -// -// [time.Duration] types are treated as nanoseconds if the TOML value is an -// integer, or they're parsed with time.ParseDuration() if they're strings. -// -// All other TOML types (float, string, int, bool and array) correspond to the -// obvious Go types. -// -// An exception to the above rules is if a type implements the TextUnmarshaler -// interface, in which case any primitive TOML value (floats, strings, integers, -// booleans, datetimes) will be converted to a []byte and given to the value's -// UnmarshalText method. See the Unmarshaler example for a demonstration with -// email addresses. -// -// ### Key mapping -// -// TOML keys can map to either keys in a Go map or field names in a Go struct. -// The special `toml` struct tag can be used to map TOML keys to struct fields -// that don't match the key name exactly (see the example). A case insensitive -// match to struct names will be tried if an exact match can't be found. -// -// The mapping between TOML values and Go values is loose. That is, there may -// exist TOML values that cannot be placed into your representation, and there -// may be parts of your representation that do not correspond to TOML values. -// This loose mapping can be made stricter by using the IsDefined and/or -// Undecoded methods on the MetaData returned. -// -// This decoder does not handle cyclic types. Decode will not terminate if a -// cyclic type is passed. -type Decoder struct { - r io.Reader -} - -// NewDecoder creates a new Decoder. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{r: r} -} - -var ( - unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() - unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() - primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem() -) - -// Decode TOML data in to the pointer `v`. -func (dec *Decoder) Decode(v interface{}) (MetaData, error) { - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr { - s := "%q" - if reflect.TypeOf(v) == nil { - s = "%v" - } - - return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v)) - } - if rv.IsNil() { - return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) - } - - // Check if this is a supported type: struct, map, interface{}, or something - // that implements UnmarshalTOML or UnmarshalText. - rv = indirect(rv) - rt := rv.Type() - if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && - !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && - !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { - return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt) - } - - // TODO: parser should read from io.Reader? Or at the very least, make it - // read from []byte rather than string - data, err := ioutil.ReadAll(dec.r) - if err != nil { - return MetaData{}, err - } - - p, err := parse(string(data)) - if err != nil { - return MetaData{}, err - } - - md := MetaData{ - mapping: p.mapping, - keyInfo: p.keyInfo, - keys: p.ordered, - decoded: make(map[string]struct{}, len(p.ordered)), - context: nil, - data: data, - } - return md, md.unify(p.mapping, rv) -} - -// PrimitiveDecode is just like the other Decode* functions, except it decodes a -// TOML value that has already been parsed. Valid primitive values can *only* be -// obtained from values filled by the decoder functions, including this method. -// (i.e., v may contain more [Primitive] values.) -// -// Meta data for primitive values is included in the meta data returned by the -// Decode* functions with one exception: keys returned by the Undecoded method -// will only reflect keys that were decoded. Namely, any keys hidden behind a -// Primitive will be considered undecoded. Executing this method will update the -// undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { - md.context = primValue.context - defer func() { md.context = nil }() - return md.unify(primValue.undecoded, rvalue(v)) -} - -// unify performs a sort of type unification based on the structure of `rv`, -// which is the client representation. -// -// Any type mismatch produces an error. Finding a type that we don't know -// how to handle produces an unsupported type error. -func (md *MetaData) unify(data interface{}, rv reflect.Value) error { - // Special case. Look for a `Primitive` value. - // TODO: #76 would make this superfluous after implemented. - if rv.Type() == primitiveType { - // Save the undecoded data and the key context into the primitive - // value. - context := make(Key, len(md.context)) - copy(context, md.context) - rv.Set(reflect.ValueOf(Primitive{ - undecoded: data, - context: context, - })) - return nil - } - - rvi := rv.Interface() - if v, ok := rvi.(Unmarshaler); ok { - return v.UnmarshalTOML(data) - } - if v, ok := rvi.(encoding.TextUnmarshaler); ok { - return md.unifyText(data, v) - } - - // TODO: - // The behavior here is incorrect whenever a Go type satisfies the - // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or - // array. In particular, the unmarshaler should only be applied to primitive - // TOML values. But at this point, it will be applied to all kinds of values - // and produce an incorrect error whenever those values are hashes or arrays - // (including arrays of tables). - - k := rv.Kind() - - if k >= reflect.Int && k <= reflect.Uint64 { - return md.unifyInt(data, rv) - } - switch k { - case reflect.Ptr: - elem := reflect.New(rv.Type().Elem()) - err := md.unify(data, reflect.Indirect(elem)) - if err != nil { - return err - } - rv.Set(elem) - return nil - case reflect.Struct: - return md.unifyStruct(data, rv) - case reflect.Map: - return md.unifyMap(data, rv) - case reflect.Array: - return md.unifyArray(data, rv) - case reflect.Slice: - return md.unifySlice(data, rv) - case reflect.String: - return md.unifyString(data, rv) - case reflect.Bool: - return md.unifyBool(data, rv) - case reflect.Interface: - if rv.NumMethod() > 0 { // Only support empty interfaces are supported. - return md.e("unsupported type %s", rv.Type()) - } - return md.unifyAnything(data, rv) - case reflect.Float32, reflect.Float64: - return md.unifyFloat64(data, rv) - } - return md.e("unsupported type %s", rv.Kind()) -} - -func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - if mapping == nil { - return nil - } - return md.e("type mismatch for %s: expected table but found %T", - rv.Type().String(), mapping) - } - - for key, datum := range tmap { - var f *field - fields := cachedTypeFields(rv.Type()) - for i := range fields { - ff := &fields[i] - if ff.name == key { - f = ff - break - } - if f == nil && strings.EqualFold(ff.name, key) { - f = ff - } - } - if f != nil { - subv := rv - for _, i := range f.index { - subv = indirect(subv.Field(i)) - } - - if isUnifiable(subv) { - md.decoded[md.context.add(key).String()] = struct{}{} - md.context = append(md.context, key) - - err := md.unify(datum, subv) - if err != nil { - return err - } - md.context = md.context[0 : len(md.context)-1] - } else if f.name != "" { - return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name) - } - } - } - return nil -} - -func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { - keyType := rv.Type().Key().Kind() - if keyType != reflect.String && keyType != reflect.Interface { - return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", - keyType, rv.Type()) - } - - tmap, ok := mapping.(map[string]interface{}) - if !ok { - if tmap == nil { - return nil - } - return md.badtype("map", mapping) - } - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - for k, v := range tmap { - md.decoded[md.context.add(k).String()] = struct{}{} - md.context = append(md.context, k) - - rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) - - err := md.unify(v, indirect(rvval)) - if err != nil { - return err - } - md.context = md.context[0 : len(md.context)-1] - - rvkey := indirect(reflect.New(rv.Type().Key())) - - switch keyType { - case reflect.Interface: - rvkey.Set(reflect.ValueOf(k)) - case reflect.String: - rvkey.SetString(k) - } - - rv.SetMapIndex(rvkey, rvval) - } - return nil -} - -func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - if !datav.IsValid() { - return nil - } - return md.badtype("slice", data) - } - if l := datav.Len(); l != rv.Len() { - return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l) - } - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - if !datav.IsValid() { - return nil - } - return md.badtype("slice", data) - } - n := datav.Len() - if rv.IsNil() || rv.Cap() < n { - rv.Set(reflect.MakeSlice(rv.Type(), n, n)) - } - rv.SetLen(n) - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { - l := data.Len() - for i := 0; i < l; i++ { - err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) - if err != nil { - return err - } - } - return nil -} - -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { - _, ok := rv.Interface().(json.Number) - if ok { - if i, ok := data.(int64); ok { - rv.SetString(strconv.FormatInt(i, 10)) - } else if f, ok := data.(float64); ok { - rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) - } else { - return md.badtype("string", data) - } - return nil - } - - if s, ok := data.(string); ok { - rv.SetString(s) - return nil - } - return md.badtype("string", data) -} - -func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { - rvk := rv.Kind() - - if num, ok := data.(float64); ok { - switch rvk { - case reflect.Float32: - if num < -math.MaxFloat32 || num > math.MaxFloat32 { - return md.parseErr(errParseRange{i: num, size: rvk.String()}) - } - fallthrough - case reflect.Float64: - rv.SetFloat(num) - default: - panic("bug") - } - return nil - } - - if num, ok := data.(int64); ok { - if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || - (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { - return md.parseErr(errParseRange{i: num, size: rvk.String()}) - } - rv.SetFloat(float64(num)) - return nil - } - - return md.badtype("float", data) -} - -func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { - _, ok := rv.Interface().(time.Duration) - if ok { - // Parse as string duration, and fall back to regular integer parsing - // (as nanosecond) if this is not a string. - if s, ok := data.(string); ok { - dur, err := time.ParseDuration(s) - if err != nil { - return md.parseErr(errParseDuration{s}) - } - rv.SetInt(int64(dur)) - return nil - } - } - - num, ok := data.(int64) - if !ok { - return md.badtype("integer", data) - } - - rvk := rv.Kind() - switch { - case rvk >= reflect.Int && rvk <= reflect.Int64: - if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) || - (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) || - (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) { - return md.parseErr(errParseRange{i: num, size: rvk.String()}) - } - rv.SetInt(num) - case rvk >= reflect.Uint && rvk <= reflect.Uint64: - unum := uint64(num) - if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) || - rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) || - rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) { - return md.parseErr(errParseRange{i: num, size: rvk.String()}) - } - rv.SetUint(unum) - default: - panic("unreachable") - } - return nil -} - -func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { - if b, ok := data.(bool); ok { - rv.SetBool(b) - return nil - } - return md.badtype("boolean", data) -} - -func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { - rv.Set(reflect.ValueOf(data)) - return nil -} - -func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { - var s string - switch sdata := data.(type) { - case Marshaler: - text, err := sdata.MarshalTOML() - if err != nil { - return err - } - s = string(text) - case encoding.TextMarshaler: - text, err := sdata.MarshalText() - if err != nil { - return err - } - s = string(text) - case fmt.Stringer: - s = sdata.String() - case string: - s = sdata - case bool: - s = fmt.Sprintf("%v", sdata) - case int64: - s = fmt.Sprintf("%d", sdata) - case float64: - s = fmt.Sprintf("%f", sdata) - default: - return md.badtype("primitive (string-like)", data) - } - if err := v.UnmarshalText([]byte(s)); err != nil { - return err - } - return nil -} - -func (md *MetaData) badtype(dst string, data interface{}) error { - return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst) -} - -func (md *MetaData) parseErr(err error) error { - k := md.context.String() - return ParseError{ - LastKey: k, - Position: md.keyInfo[k].pos, - Line: md.keyInfo[k].pos.Line, - err: err, - input: string(md.data), - } -} - -func (md *MetaData) e(format string, args ...interface{}) error { - f := "toml: " - if len(md.context) > 0 { - f = fmt.Sprintf("toml: (last key %q): ", md.context) - p := md.keyInfo[md.context.String()].pos - if p.Line > 0 { - f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context) - } - } - return fmt.Errorf(f+format, args...) -} - -// rvalue returns a reflect.Value of `v`. All pointers are resolved. -func rvalue(v interface{}) reflect.Value { - return indirect(reflect.ValueOf(v)) -} - -// indirect returns the value pointed to by a pointer. -// -// Pointers are followed until the value is not a pointer. New values are -// allocated for each nil pointer. -// -// An exception to this rule is if the value satisfies an interface of interest -// to us (like encoding.TextUnmarshaler). -func indirect(v reflect.Value) reflect.Value { - if v.Kind() != reflect.Ptr { - if v.CanSet() { - pv := v.Addr() - pvi := pv.Interface() - if _, ok := pvi.(encoding.TextUnmarshaler); ok { - return pv - } - if _, ok := pvi.(Unmarshaler); ok { - return pv - } - } - return v - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - return indirect(reflect.Indirect(v)) -} - -func isUnifiable(rv reflect.Value) bool { - if rv.CanSet() { - return true - } - rvi := rv.Interface() - if _, ok := rvi.(encoding.TextUnmarshaler); ok { - return true - } - if _, ok := rvi.(Unmarshaler); ok { - return true - } - return false -} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go deleted file mode 100644 index 086d0b6866..0000000000 --- a/vendor/github.com/BurntSushi/toml/decode_go116.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package toml - -import ( - "io/fs" -) - -// DecodeFS reads the contents of a file from [fs.FS] and decodes it with -// [Decode]. -func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { - fp, err := fsys.Open(path) - if err != nil { - return MetaData{}, err - } - defer fp.Close() - return NewDecoder(fp).Decode(v) -} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go deleted file mode 100644 index c6af3f239d..0000000000 --- a/vendor/github.com/BurntSushi/toml/deprecated.go +++ /dev/null @@ -1,21 +0,0 @@ -package toml - -import ( - "encoding" - "io" -) - -// Deprecated: use encoding.TextMarshaler -type TextMarshaler encoding.TextMarshaler - -// Deprecated: use encoding.TextUnmarshaler -type TextUnmarshaler encoding.TextUnmarshaler - -// Deprecated: use MetaData.PrimitiveDecode. -func PrimitiveDecode(primValue Primitive, v interface{}) error { - md := MetaData{decoded: make(map[string]struct{})} - return md.unify(primValue.undecoded, rvalue(v)) -} - -// Deprecated: use NewDecoder(reader).Decode(&value). -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go deleted file mode 100644 index 81a7c0fe9f..0000000000 --- a/vendor/github.com/BurntSushi/toml/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package toml implements decoding and encoding of TOML files. -// -// This package supports TOML v1.0.0, as specified at https://toml.io -// -// There is also support for delaying decoding with the Primitive type, and -// querying the set of keys in a TOML document with the MetaData type. -// -// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, -// and can be used to verify if TOML document is valid. It can also be used to -// print the type of each key. -package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go deleted file mode 100644 index 930e1d521a..0000000000 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ /dev/null @@ -1,750 +0,0 @@ -package toml - -import ( - "bufio" - "encoding" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/BurntSushi/toml/internal" -) - -type tomlEncodeError struct{ error } - -var ( - errArrayNilElement = errors.New("toml: cannot encode array with nil element") - errNonString = errors.New("toml: cannot encode a map with non-string key type") - errNoKey = errors.New("toml: top-level values must be Go maps or structs") - errAnything = errors.New("") // used in testing -) - -var dblQuotedReplacer = strings.NewReplacer( - "\"", "\\\"", - "\\", "\\\\", - "\x00", `\u0000`, - "\x01", `\u0001`, - "\x02", `\u0002`, - "\x03", `\u0003`, - "\x04", `\u0004`, - "\x05", `\u0005`, - "\x06", `\u0006`, - "\x07", `\u0007`, - "\b", `\b`, - "\t", `\t`, - "\n", `\n`, - "\x0b", `\u000b`, - "\f", `\f`, - "\r", `\r`, - "\x0e", `\u000e`, - "\x0f", `\u000f`, - "\x10", `\u0010`, - "\x11", `\u0011`, - "\x12", `\u0012`, - "\x13", `\u0013`, - "\x14", `\u0014`, - "\x15", `\u0015`, - "\x16", `\u0016`, - "\x17", `\u0017`, - "\x18", `\u0018`, - "\x19", `\u0019`, - "\x1a", `\u001a`, - "\x1b", `\u001b`, - "\x1c", `\u001c`, - "\x1d", `\u001d`, - "\x1e", `\u001e`, - "\x1f", `\u001f`, - "\x7f", `\u007f`, -) - -var ( - marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem() - marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - timeType = reflect.TypeOf((*time.Time)(nil)).Elem() -) - -// Marshaler is the interface implemented by types that can marshal themselves -// into valid TOML. -type Marshaler interface { - MarshalTOML() ([]byte, error) -} - -// Encoder encodes a Go to a TOML document. -// -// The mapping between Go values and TOML values should be precisely the same as -// for [Decode]. -// -// time.Time is encoded as a RFC 3339 string, and time.Duration as its string -// representation. -// -// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to -// encoding the value as custom TOML. -// -// If you want to write arbitrary binary data then you will need to use -// something like base64 since TOML does not have any binary types. -// -// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes -// are encoded first. -// -// Go maps will be sorted alphabetically by key for deterministic output. -// -// The toml struct tag can be used to provide the key name; if omitted the -// struct field name will be used. If the "omitempty" option is present the -// following value will be skipped: -// -// - arrays, slices, maps, and string with len of 0 -// - struct with all zero values -// - bool false -// -// If omitzero is given all int and float types with a value of 0 will be -// skipped. -// -// Encoding Go values without a corresponding TOML representation will return an -// error. Examples of this includes maps with non-string keys, slices with nil -// elements, embedded non-struct types, and nested slices containing maps or -// structs. (e.g. [][]map[string]string is not allowed but []map[string]string -// is okay, as is []map[string][]string). -// -// NOTE: only exported keys are encoded due to the use of reflection. Unexported -// keys are silently discarded. -type Encoder struct { - // String to use for a single indentation level; default is two spaces. - Indent string - - w *bufio.Writer - hasWritten bool // written any output to w yet? -} - -// NewEncoder create a new Encoder. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: bufio.NewWriter(w), - Indent: " ", - } -} - -// Encode writes a TOML representation of the Go value to the [Encoder]'s writer. -// -// An error is returned if the value given cannot be encoded to a valid TOML -// document. -func (enc *Encoder) Encode(v interface{}) error { - rv := eindirect(reflect.ValueOf(v)) - if err := enc.safeEncode(Key([]string{}), rv); err != nil { - return err - } - return enc.w.Flush() -} - -func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { - defer func() { - if r := recover(); r != nil { - if terr, ok := r.(tomlEncodeError); ok { - err = terr.error - return - } - panic(r) - } - }() - enc.encode(key, rv) - return nil -} - -func (enc *Encoder) encode(key Key, rv reflect.Value) { - // If we can marshal the type to text, then we use that. This prevents the - // encoder for handling these types as generic structs (or whatever the - // underlying type of a TextMarshaler is). - switch { - case isMarshaler(rv): - enc.writeKeyValue(key, rv, false) - return - case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented. - enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded)) - return - } - - k := rv.Kind() - switch k { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64, - reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: - enc.writeKeyValue(key, rv, false) - case reflect.Array, reflect.Slice: - if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { - enc.eArrayOfTables(key, rv) - } else { - enc.writeKeyValue(key, rv, false) - } - case reflect.Interface: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Map: - if rv.IsNil() { - return - } - enc.eTable(key, rv) - case reflect.Ptr: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Struct: - enc.eTable(key, rv) - default: - encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) - } -} - -// eElement encodes any value that can be an array element. -func (enc *Encoder) eElement(rv reflect.Value) { - switch v := rv.Interface().(type) { - case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. - format := time.RFC3339Nano - switch v.Location() { - case internal.LocalDatetime: - format = "2006-01-02T15:04:05.999999999" - case internal.LocalDate: - format = "2006-01-02" - case internal.LocalTime: - format = "15:04:05.999999999" - } - switch v.Location() { - default: - enc.wf(v.Format(format)) - case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: - enc.wf(v.In(time.UTC).Format(format)) - } - return - case Marshaler: - s, err := v.MarshalTOML() - if err != nil { - encPanic(err) - } - if s == nil { - encPanic(errors.New("MarshalTOML returned nil and no error")) - } - enc.w.Write(s) - return - case encoding.TextMarshaler: - s, err := v.MarshalText() - if err != nil { - encPanic(err) - } - if s == nil { - encPanic(errors.New("MarshalText returned nil and no error")) - } - enc.writeQuoted(string(s)) - return - case time.Duration: - enc.writeQuoted(v.String()) - return - case json.Number: - n, _ := rv.Interface().(json.Number) - - if n == "" { /// Useful zero value. - enc.w.WriteByte('0') - return - } else if v, err := n.Int64(); err == nil { - enc.eElement(reflect.ValueOf(v)) - return - } else if v, err := n.Float64(); err == nil { - enc.eElement(reflect.ValueOf(v)) - return - } - encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n)) - } - - switch rv.Kind() { - case reflect.Ptr: - enc.eElement(rv.Elem()) - return - case reflect.String: - enc.writeQuoted(rv.String()) - case reflect.Bool: - enc.wf(strconv.FormatBool(rv.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - enc.wf(strconv.FormatInt(rv.Int(), 10)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - enc.wf(strconv.FormatUint(rv.Uint(), 10)) - case reflect.Float32: - f := rv.Float() - if math.IsNaN(f) { - enc.wf("nan") - } else if math.IsInf(f, 0) { - enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) - } else { - enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) - } - case reflect.Float64: - f := rv.Float() - if math.IsNaN(f) { - enc.wf("nan") - } else if math.IsInf(f, 0) { - enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) - } else { - enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) - } - case reflect.Array, reflect.Slice: - enc.eArrayOrSliceElement(rv) - case reflect.Struct: - enc.eStruct(nil, rv, true) - case reflect.Map: - enc.eMap(nil, rv, true) - case reflect.Interface: - enc.eElement(rv.Elem()) - default: - encPanic(fmt.Errorf("unexpected type: %T", rv.Interface())) - } -} - -// By the TOML spec, all floats must have a decimal with at least one number on -// either side. -func floatAddDecimal(fstr string) string { - if !strings.Contains(fstr, ".") { - return fstr + ".0" - } - return fstr -} - -func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) -} - -func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { - length := rv.Len() - enc.wf("[") - for i := 0; i < length; i++ { - elem := eindirect(rv.Index(i)) - enc.eElement(elem) - if i != length-1 { - enc.wf(", ") - } - } - enc.wf("]") -} - -func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - for i := 0; i < rv.Len(); i++ { - trv := eindirect(rv.Index(i)) - if isNil(trv) { - continue - } - enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key) - enc.newline() - enc.eMapOrStruct(key, trv, false) - } -} - -func (enc *Encoder) eTable(key Key, rv reflect.Value) { - if len(key) == 1 { - // Output an extra newline between top-level tables. - // (The newline isn't written if nothing else has been written though.) - enc.newline() - } - if len(key) > 0 { - enc.wf("%s[%s]", enc.indentStr(key), key) - enc.newline() - } - enc.eMapOrStruct(key, rv, false) -} - -func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { - switch rv.Kind() { - case reflect.Map: - enc.eMap(key, rv, inline) - case reflect.Struct: - enc.eStruct(key, rv, inline) - default: - // Should never happen? - panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) - } -} - -func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { - rt := rv.Type() - if rt.Key().Kind() != reflect.String { - encPanic(errNonString) - } - - // Sort keys so that we have deterministic output. And write keys directly - // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string - for _, mapKey := range rv.MapKeys() { - k := mapKey.String() - if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { - mapKeysSub = append(mapKeysSub, k) - } else { - mapKeysDirect = append(mapKeysDirect, k) - } - } - - var writeMapKeys = func(mapKeys []string, trailC bool) { - sort.Strings(mapKeys) - for i, mapKey := range mapKeys { - val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) - if isNil(val) { - continue - } - - if inline { - enc.writeKeyValue(Key{mapKey}, val, true) - if trailC || i != len(mapKeys)-1 { - enc.wf(", ") - } - } else { - enc.encode(key.add(mapKey), val) - } - } - } - - if inline { - enc.wf("{") - } - writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) - writeMapKeys(mapKeysSub, false) - if inline { - enc.wf("}") - } -} - -const is32Bit = (32 << (^uint(0) >> 63)) == 32 - -func pointerTo(t reflect.Type) reflect.Type { - if t.Kind() == reflect.Ptr { - return pointerTo(t.Elem()) - } - return t -} - -func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { - // Write keys for fields directly under this key first, because if we write - // a field that creates a new table then all keys under it will be in that - // table (not the one we're writing here). - // - // Fields is a [][]int: for fieldsDirect this always has one entry (the - // struct index). For fieldsSub it contains two entries: the parent field - // index from tv, and the field indexes for the fields of the sub. - var ( - rt = rv.Type() - fieldsDirect, fieldsSub [][]int - addFields func(rt reflect.Type, rv reflect.Value, start []int) - ) - addFields = func(rt reflect.Type, rv reflect.Value, start []int) { - for i := 0; i < rt.NumField(); i++ { - f := rt.Field(i) - isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct - if f.PkgPath != "" && !isEmbed { /// Skip unexported fields. - continue - } - opts := getOptions(f.Tag) - if opts.skip { - continue - } - - frv := eindirect(rv.Field(i)) - - // Treat anonymous struct fields with tag names as though they are - // not anonymous, like encoding/json does. - // - // Non-struct anonymous fields use the normal encoding logic. - if isEmbed { - if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct { - addFields(frv.Type(), frv, append(start, f.Index...)) - continue - } - } - - if typeIsTable(tomlTypeOfGo(frv)) { - fieldsSub = append(fieldsSub, append(start, f.Index...)) - } else { - // Copy so it works correct on 32bit archs; not clear why this - // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 - // This also works fine on 64bit, but 32bit archs are somewhat - // rare and this is a wee bit faster. - if is32Bit { - copyStart := make([]int, len(start)) - copy(copyStart, start) - fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...)) - } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) - } - } - } - } - addFields(rt, rv, nil) - - writeFields := func(fields [][]int) { - for _, fieldIndex := range fields { - fieldType := rt.FieldByIndex(fieldIndex) - fieldVal := eindirect(rv.FieldByIndex(fieldIndex)) - - if isNil(fieldVal) { /// Don't write anything for nil fields. - continue - } - - opts := getOptions(fieldType.Tag) - if opts.skip { - continue - } - keyName := fieldType.Name - if opts.name != "" { - keyName = opts.name - } - - if opts.omitempty && enc.isEmpty(fieldVal) { - continue - } - if opts.omitzero && isZero(fieldVal) { - continue - } - - if inline { - enc.writeKeyValue(Key{keyName}, fieldVal, true) - if fieldIndex[0] != len(fields)-1 { - enc.wf(", ") - } - } else { - enc.encode(key.add(keyName), fieldVal) - } - } - } - - if inline { - enc.wf("{") - } - writeFields(fieldsDirect) - writeFields(fieldsSub) - if inline { - enc.wf("}") - } -} - -// tomlTypeOfGo returns the TOML type name of the Go value's type. -// -// It is used to determine whether the types of array elements are mixed (which -// is forbidden). If the Go value is nil, then it is illegal for it to be an -// array element, and valueIsNil is returned as true. -// -// The type may be `nil`, which means no concrete TOML type could be found. -func tomlTypeOfGo(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() { - return nil - } - - if rv.Kind() == reflect.Struct { - if rv.Type() == timeType { - return tomlDatetime - } - if isMarshaler(rv) { - return tomlString - } - return tomlHash - } - - if isMarshaler(rv) { - return tomlString - } - - switch rv.Kind() { - case reflect.Bool: - return tomlBool - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64: - return tomlInteger - case reflect.Float32, reflect.Float64: - return tomlFloat - case reflect.Array, reflect.Slice: - if isTableArray(rv) { - return tomlArrayHash - } - return tomlArray - case reflect.Ptr, reflect.Interface: - return tomlTypeOfGo(rv.Elem()) - case reflect.String: - return tomlString - case reflect.Map: - return tomlHash - default: - encPanic(errors.New("unsupported type: " + rv.Kind().String())) - panic("unreachable") - } -} - -func isMarshaler(rv reflect.Value) bool { - return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml) -} - -// isTableArray reports if all entries in the array or slice are a table. -func isTableArray(arr reflect.Value) bool { - if isNil(arr) || !arr.IsValid() || arr.Len() == 0 { - return false - } - - ret := true - for i := 0; i < arr.Len(); i++ { - tt := tomlTypeOfGo(eindirect(arr.Index(i))) - // Don't allow nil. - if tt == nil { - encPanic(errArrayNilElement) - } - - if ret && !typeEqual(tomlHash, tt) { - ret = false - } - } - return ret -} - -type tagOptions struct { - skip bool // "-" - name string - omitempty bool - omitzero bool -} - -func getOptions(tag reflect.StructTag) tagOptions { - t := tag.Get("toml") - if t == "-" { - return tagOptions{skip: true} - } - var opts tagOptions - parts := strings.Split(t, ",") - opts.name = parts[0] - for _, s := range parts[1:] { - switch s { - case "omitempty": - opts.omitempty = true - case "omitzero": - opts.omitzero = true - } - } - return opts -} - -func isZero(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return rv.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return rv.Uint() == 0 - case reflect.Float32, reflect.Float64: - return rv.Float() == 0.0 - } - return false -} - -func (enc *Encoder) isEmpty(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Array, reflect.Slice, reflect.Map, reflect.String: - return rv.Len() == 0 - case reflect.Struct: - if rv.Type().Comparable() { - return reflect.Zero(rv.Type()).Interface() == rv.Interface() - } - // Need to also check if all the fields are empty, otherwise something - // like this with uncomparable types will always return true: - // - // type a struct{ field b } - // type b struct{ s []string } - // s := a{field: b{s: []string{"AAA"}}} - for i := 0; i < rv.NumField(); i++ { - if !enc.isEmpty(rv.Field(i)) { - return false - } - } - return true - case reflect.Bool: - return !rv.Bool() - } - return false -} - -func (enc *Encoder) newline() { - if enc.hasWritten { - enc.wf("\n") - } -} - -// Write a key/value pair: -// -// key = -// -// This is also used for "k = v" in inline tables; so something like this will -// be written in three calls: -// -// ┌───────────────────┐ -// │ ┌───┐ ┌────┐│ -// v v v v vv -// key = {k = 1, k2 = 2} -func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { - if len(key) == 0 { - encPanic(errNoKey) - } - enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) - enc.eElement(val) - if !inline { - enc.newline() - } -} - -func (enc *Encoder) wf(format string, v ...interface{}) { - _, err := fmt.Fprintf(enc.w, format, v...) - if err != nil { - encPanic(err) - } - enc.hasWritten = true -} - -func (enc *Encoder) indentStr(key Key) string { - return strings.Repeat(enc.Indent, len(key)-1) -} - -func encPanic(err error) { - panic(tomlEncodeError{err}) -} - -// Resolve any level of pointers to the actual value (e.g. **string → string). -func eindirect(v reflect.Value) reflect.Value { - if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface { - if isMarshaler(v) { - return v - } - if v.CanAddr() { /// Special case for marshalers; see #358. - if pv := v.Addr(); isMarshaler(pv) { - return pv - } - } - return v - } - - if v.IsNil() { - return v - } - - return eindirect(v.Elem()) -} - -func isNil(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return rv.IsNil() - default: - return false - } -} diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go deleted file mode 100644 index f4f390e647..0000000000 --- a/vendor/github.com/BurntSushi/toml/error.go +++ /dev/null @@ -1,279 +0,0 @@ -package toml - -import ( - "fmt" - "strings" -) - -// ParseError is returned when there is an error parsing the TOML syntax such as -// invalid syntax, duplicate keys, etc. -// -// In addition to the error message itself, you can also print detailed location -// information with context by using [ErrorWithPosition]: -// -// toml: error: Key 'fruit' was already created and cannot be used as an array. -// -// At line 4, column 2-7: -// -// 2 | fruit = [] -// 3 | -// 4 | [[fruit]] # Not allowed -// ^^^^^ -// -// [ErrorWithUsage] can be used to print the above with some more detailed usage -// guidance: -// -// toml: error: newlines not allowed within inline tables -// -// At line 1, column 18: -// -// 1 | x = [{ key = 42 # -// ^ -// -// Error help: -// -// Inline tables must always be on a single line: -// -// table = {key = 42, second = 43} -// -// It is invalid to split them over multiple lines like so: -// -// # INVALID -// table = { -// key = 42, -// second = 43 -// } -// -// Use regular for this: -// -// [table] -// key = 42 -// second = 43 -type ParseError struct { - Message string // Short technical message. - Usage string // Longer message with usage guidance; may be blank. - Position Position // Position of the error - LastKey string // Last parsed key, may be blank. - - // Line the error occurred. - // - // Deprecated: use [Position]. - Line int - - err error - input string -} - -// Position of an error. -type Position struct { - Line int // Line number, starting at 1. - Start int // Start of error, as byte offset starting at 0. - Len int // Lenght in bytes. -} - -func (pe ParseError) Error() string { - msg := pe.Message - if msg == "" { // Error from errorf() - msg = pe.err.Error() - } - - if pe.LastKey == "" { - return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) - } - return fmt.Sprintf("toml: line %d (last key %q): %s", - pe.Position.Line, pe.LastKey, msg) -} - -// ErrorWithUsage() returns the error with detailed location context. -// -// See the documentation on [ParseError]. -func (pe ParseError) ErrorWithPosition() string { - if pe.input == "" { // Should never happen, but just in case. - return pe.Error() - } - - var ( - lines = strings.Split(pe.input, "\n") - col = pe.column(lines) - b = new(strings.Builder) - ) - - msg := pe.Message - if msg == "" { - msg = pe.err.Error() - } - - // TODO: don't show control characters as literals? This may not show up - // well everywhere. - - if pe.Position.Len == 1 { - fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", - msg, pe.Position.Line, col+1) - } else { - fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", - msg, pe.Position.Line, col, col+pe.Position.Len) - } - if pe.Position.Line > 2 { - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3]) - } - if pe.Position.Line > 1 { - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2]) - } - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1]) - fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len)) - return b.String() -} - -// ErrorWithUsage() returns the error with detailed location context and usage -// guidance. -// -// See the documentation on [ParseError]. -func (pe ParseError) ErrorWithUsage() string { - m := pe.ErrorWithPosition() - if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { - lines := strings.Split(strings.TrimSpace(u.Usage()), "\n") - for i := range lines { - if lines[i] != "" { - lines[i] = " " + lines[i] - } - } - return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n" - } - return m -} - -func (pe ParseError) column(lines []string) int { - var pos, col int - for i := range lines { - ll := len(lines[i]) + 1 // +1 for the removed newline - if pos+ll >= pe.Position.Start { - col = pe.Position.Start - pos - if col < 0 { // Should never happen, but just in case. - col = 0 - } - break - } - pos += ll - } - - return col -} - -type ( - errLexControl struct{ r rune } - errLexEscape struct{ r rune } - errLexUTF8 struct{ b byte } - errLexInvalidNum struct{ v string } - errLexInvalidDate struct{ v string } - errLexInlineTableNL struct{} - errLexStringNL struct{} - errParseRange struct { - i interface{} // int or float - size string // "int64", "uint16", etc. - } - errParseDuration struct{ d string } -) - -func (e errLexControl) Error() string { - return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r) -} -func (e errLexControl) Usage() string { return "" } - -func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) } -func (e errLexEscape) Usage() string { return usageEscape } -func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } -func (e errLexUTF8) Usage() string { return "" } -func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) } -func (e errLexInvalidNum) Usage() string { return "" } -func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) } -func (e errLexInvalidDate) Usage() string { return "" } -func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } -func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } -func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } -func (e errLexStringNL) Usage() string { return usageStringNewline } -func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } -func (e errParseRange) Usage() string { return usageIntOverflow } -func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } -func (e errParseDuration) Usage() string { return usageDuration } - -const usageEscape = ` -A '\' inside a "-delimited string is interpreted as an escape character. - -The following escape sequences are supported: -\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX - -To prevent a '\' from being recognized as an escape character, use either: - -- a ' or '''-delimited string; escape characters aren't processed in them; or -- write two backslashes to get a single backslash: '\\'. - -If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/' -instead of '\' will usually also work: "C:/Users/martin". -` - -const usageInlineNewline = ` -Inline tables must always be on a single line: - - table = {key = 42, second = 43} - -It is invalid to split them over multiple lines like so: - - # INVALID - table = { - key = 42, - second = 43 - } - -Use regular for this: - - [table] - key = 42 - second = 43 -` - -const usageStringNewline = ` -Strings must always be on a single line, and cannot span more than one line: - - # INVALID - string = "Hello, - world!" - -Instead use """ or ''' to split strings over multiple lines: - - string = """Hello, - world!""" -` - -const usageIntOverflow = ` -This number is too large; this may be an error in the TOML, but it can also be a -bug in the program that uses too small of an integer. - -The maximum and minimum values are: - - size │ lowest │ highest - ───────┼────────────────┼────────── - int8 │ -128 │ 127 - int16 │ -32,768 │ 32,767 - int32 │ -2,147,483,648 │ 2,147,483,647 - int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷ - uint8 │ 0 │ 255 - uint16 │ 0 │ 65535 - uint32 │ 0 │ 4294967295 - uint64 │ 0 │ 1.8 × 10¹⁸ - -int refers to int32 on 32-bit systems and int64 on 64-bit systems. -` - -const usageDuration = ` -A duration must be as "number", without any spaces. Valid units are: - - ns nanoseconds (billionth of a second) - us, µs microseconds (millionth of a second) - ms milliseconds (thousands of a second) - s seconds - m minutes - h hours - -You can combine multiple units; for example "5m10s" for 5 minutes and 10 -seconds. -` diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go deleted file mode 100644 index 022f15bc2b..0000000000 --- a/vendor/github.com/BurntSushi/toml/internal/tz.go +++ /dev/null @@ -1,36 +0,0 @@ -package internal - -import "time" - -// Timezones used for local datetime, date, and time TOML types. -// -// The exact way times and dates without a timezone should be interpreted is not -// well-defined in the TOML specification and left to the implementation. These -// defaults to current local timezone offset of the computer, but this can be -// changed by changing these variables before decoding. -// -// TODO: -// Ideally we'd like to offer people the ability to configure the used timezone -// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit -// tricky: the reason we use three different variables for this is to support -// round-tripping – without these specific TZ names we wouldn't know which -// format to use. -// -// There isn't a good way to encode this right now though, and passing this sort -// of information also ties in to various related issues such as string format -// encoding, encoding of comments, etc. -// -// So, for the time being, just put this in internal until we can write a good -// comprehensive API for doing all of this. -// -// The reason they're exported is because they're referred from in e.g. -// internal/tag. -// -// Note that this behaviour is valid according to the TOML spec as the exact -// behaviour is left up to implementations. -var ( - localOffset = func() int { _, o := time.Now().Zone(); return o }() - LocalDatetime = time.FixedZone("datetime-local", localOffset) - LocalDate = time.FixedZone("date-local", localOffset) - LocalTime = time.FixedZone("time-local", localOffset) -) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go deleted file mode 100644 index d4d70871d8..0000000000 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ /dev/null @@ -1,1233 +0,0 @@ -package toml - -import ( - "fmt" - "reflect" - "runtime" - "strings" - "unicode" - "unicode/utf8" -) - -type itemType int - -const ( - itemError itemType = iota - itemNIL // used in the parser to indicate no type - itemEOF - itemText - itemString - itemRawString - itemMultilineString - itemRawMultilineString - itemBool - itemInteger - itemFloat - itemDatetime - itemArray // the start of an array - itemArrayEnd - itemTableStart - itemTableEnd - itemArrayTableStart - itemArrayTableEnd - itemKeyStart - itemKeyEnd - itemCommentStart - itemInlineTableStart - itemInlineTableEnd -) - -const eof = 0 - -type stateFn func(lx *lexer) stateFn - -func (p Position) String() string { - return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len) -} - -type lexer struct { - input string - start int - pos int - line int - state stateFn - items chan item - - // Allow for backing up up to 4 runes. This is necessary because TOML - // contains 3-rune tokens (""" and '''). - prevWidths [4]int - nprev int // how many of prevWidths are in use - atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again. - - // A stack of state functions used to maintain context. - // - // The idea is to reuse parts of the state machine in various places. For - // example, values can appear at the top level or within arbitrarily nested - // arrays. The last state on the stack is used after a value has been lexed. - // Similarly for comments. - stack []stateFn -} - -type item struct { - typ itemType - val string - err error - pos Position -} - -func (lx *lexer) nextItem() item { - for { - select { - case item := <-lx.items: - return item - default: - lx.state = lx.state(lx) - //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack) - } - } -} - -func lex(input string) *lexer { - lx := &lexer{ - input: input, - state: lexTop, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - line: 1, - } - return lx -} - -func (lx *lexer) push(state stateFn) { - lx.stack = append(lx.stack, state) -} - -func (lx *lexer) pop() stateFn { - if len(lx.stack) == 0 { - return lx.errorf("BUG in lexer: no states to pop") - } - last := lx.stack[len(lx.stack)-1] - lx.stack = lx.stack[0 : len(lx.stack)-1] - return last -} - -func (lx *lexer) current() string { - return lx.input[lx.start:lx.pos] -} - -func (lx lexer) getPos() Position { - p := Position{ - Line: lx.line, - Start: lx.start, - Len: lx.pos - lx.start, - } - if p.Len <= 0 { - p.Len = 1 - } - return p -} - -func (lx *lexer) emit(typ itemType) { - // Needed for multiline strings ending with an incomplete UTF-8 sequence. - if lx.start > lx.pos { - lx.error(errLexUTF8{lx.input[lx.pos]}) - return - } - lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} - lx.start = lx.pos -} - -func (lx *lexer) emitTrim(typ itemType) { - lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())} - lx.start = lx.pos -} - -func (lx *lexer) next() (r rune) { - if lx.atEOF { - panic("BUG in lexer: next called after EOF") - } - if lx.pos >= len(lx.input) { - lx.atEOF = true - return eof - } - - if lx.input[lx.pos] == '\n' { - lx.line++ - } - lx.prevWidths[3] = lx.prevWidths[2] - lx.prevWidths[2] = lx.prevWidths[1] - lx.prevWidths[1] = lx.prevWidths[0] - if lx.nprev < 4 { - lx.nprev++ - } - - r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) - if r == utf8.RuneError { - lx.error(errLexUTF8{lx.input[lx.pos]}) - return utf8.RuneError - } - - // Note: don't use peek() here, as this calls next(). - if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) { - lx.errorControlChar(r) - return utf8.RuneError - } - - lx.prevWidths[0] = w - lx.pos += w - return r -} - -// ignore skips over the pending input before this point. -func (lx *lexer) ignore() { - lx.start = lx.pos -} - -// backup steps back one rune. Can be called 4 times between calls to next. -func (lx *lexer) backup() { - if lx.atEOF { - lx.atEOF = false - return - } - if lx.nprev < 1 { - panic("BUG in lexer: backed up too far") - } - w := lx.prevWidths[0] - lx.prevWidths[0] = lx.prevWidths[1] - lx.prevWidths[1] = lx.prevWidths[2] - lx.prevWidths[2] = lx.prevWidths[3] - lx.nprev-- - - lx.pos -= w - if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { - lx.line-- - } -} - -// accept consumes the next rune if it's equal to `valid`. -func (lx *lexer) accept(valid rune) bool { - if lx.next() == valid { - return true - } - lx.backup() - return false -} - -// peek returns but does not consume the next rune in the input. -func (lx *lexer) peek() rune { - r := lx.next() - lx.backup() - return r -} - -// skip ignores all input that matches the given predicate. -func (lx *lexer) skip(pred func(rune) bool) { - for { - r := lx.next() - if pred(r) { - continue - } - lx.backup() - lx.ignore() - return - } -} - -// error stops all lexing by emitting an error and returning `nil`. -// -// Note that any value that is a character is escaped if it's a special -// character (newlines, tabs, etc.). -func (lx *lexer) error(err error) stateFn { - if lx.atEOF { - return lx.errorPrevLine(err) - } - lx.items <- item{typ: itemError, pos: lx.getPos(), err: err} - return nil -} - -// errorfPrevline is like error(), but sets the position to the last column of -// the previous line. -// -// This is so that unexpected EOF or NL errors don't show on a new blank line. -func (lx *lexer) errorPrevLine(err error) stateFn { - pos := lx.getPos() - pos.Line-- - pos.Len = 1 - pos.Start = lx.pos - 1 - lx.items <- item{typ: itemError, pos: pos, err: err} - return nil -} - -// errorPos is like error(), but allows explicitly setting the position. -func (lx *lexer) errorPos(start, length int, err error) stateFn { - pos := lx.getPos() - pos.Start = start - pos.Len = length - lx.items <- item{typ: itemError, pos: pos, err: err} - return nil -} - -// errorf is like error, and creates a new error. -func (lx *lexer) errorf(format string, values ...interface{}) stateFn { - if lx.atEOF { - pos := lx.getPos() - pos.Line-- - pos.Len = 1 - pos.Start = lx.pos - 1 - lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} - return nil - } - lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)} - return nil -} - -func (lx *lexer) errorControlChar(cc rune) stateFn { - return lx.errorPos(lx.pos-1, 1, errLexControl{cc}) -} - -// lexTop consumes elements at the top level of TOML data. -func lexTop(lx *lexer) stateFn { - r := lx.next() - if isWhitespace(r) || isNL(r) { - return lexSkip(lx, lexTop) - } - switch r { - case '#': - lx.push(lexTop) - return lexCommentStart - case '[': - return lexTableStart - case eof: - if lx.pos > lx.start { - return lx.errorf("unexpected EOF") - } - lx.emit(itemEOF) - return nil - } - - // At this point, the only valid item can be a key, so we back up - // and let the key lexer do the rest. - lx.backup() - lx.push(lexTopEnd) - return lexKeyStart -} - -// lexTopEnd is entered whenever a top-level item has been consumed. (A value -// or a table.) It must see only whitespace, and will turn back to lexTop -// upon a newline. If it sees EOF, it will quit the lexer successfully. -func lexTopEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case r == '#': - // a comment will read to a newline for us. - lx.push(lexTop) - return lexCommentStart - case isWhitespace(r): - return lexTopEnd - case isNL(r): - lx.ignore() - return lexTop - case r == eof: - lx.emit(itemEOF) - return nil - } - return lx.errorf( - "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", - r) -} - -// lexTable lexes the beginning of a table. Namely, it makes sure that -// it starts with a character other than '.' and ']'. -// It assumes that '[' has already been consumed. -// It also handles the case that this is an item in an array of tables. -// e.g., '[[name]]'. -func lexTableStart(lx *lexer) stateFn { - if lx.peek() == '[' { - lx.next() - lx.emit(itemArrayTableStart) - lx.push(lexArrayTableEnd) - } else { - lx.emit(itemTableStart) - lx.push(lexTableEnd) - } - return lexTableNameStart -} - -func lexTableEnd(lx *lexer) stateFn { - lx.emit(itemTableEnd) - return lexTopEnd -} - -func lexArrayTableEnd(lx *lexer) stateFn { - if r := lx.next(); r != ']' { - return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r) - } - lx.emit(itemArrayTableEnd) - return lexTopEnd -} - -func lexTableNameStart(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.peek(); { - case r == ']' || r == eof: - return lx.errorf("unexpected end of table name (table names cannot be empty)") - case r == '.': - return lx.errorf("unexpected table separator (table names cannot be empty)") - case r == '"' || r == '\'': - lx.ignore() - lx.push(lexTableNameEnd) - return lexQuotedName - default: - lx.push(lexTableNameEnd) - return lexBareName - } -} - -// lexTableNameEnd reads the end of a piece of a table name, optionally -// consuming whitespace. -func lexTableNameEnd(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.next(); { - case isWhitespace(r): - return lexTableNameEnd - case r == '.': - lx.ignore() - return lexTableNameStart - case r == ']': - return lx.pop() - default: - return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) - } -} - -// lexBareName lexes one part of a key or table. -// -// It assumes that at least one valid character for the table has already been -// read. -// -// Lexes only one part, e.g. only 'a' inside 'a.b'. -func lexBareName(lx *lexer) stateFn { - r := lx.next() - if isBareKeyChar(r) { - return lexBareName - } - lx.backup() - lx.emit(itemText) - return lx.pop() -} - -// lexBareName lexes one part of a key or table. -// -// It assumes that at least one valid character for the table has already been -// read. -// -// Lexes only one part, e.g. only '"a"' inside '"a".b'. -func lexQuotedName(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r): - return lexSkip(lx, lexValue) - case r == '"': - lx.ignore() // ignore the '"' - return lexString - case r == '\'': - lx.ignore() // ignore the "'" - return lexRawString - case r == eof: - return lx.errorf("unexpected EOF; expected value") - default: - return lx.errorf("expected value but found %q instead", r) - } -} - -// lexKeyStart consumes all key parts until a '='. -func lexKeyStart(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.peek(); { - case r == '=' || r == eof: - return lx.errorf("unexpected '=': key name appears blank") - case r == '.': - return lx.errorf("unexpected '.': keys cannot start with a '.'") - case r == '"' || r == '\'': - lx.ignore() - fallthrough - default: // Bare key - lx.emit(itemKeyStart) - return lexKeyNameStart - } -} - -func lexKeyNameStart(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.peek(); { - case r == '=' || r == eof: - return lx.errorf("unexpected '='") - case r == '.': - return lx.errorf("unexpected '.'") - case r == '"' || r == '\'': - lx.ignore() - lx.push(lexKeyEnd) - return lexQuotedName - default: - lx.push(lexKeyEnd) - return lexBareName - } -} - -// lexKeyEnd consumes the end of a key and trims whitespace (up to the key -// separator). -func lexKeyEnd(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.next(); { - case isWhitespace(r): - return lexSkip(lx, lexKeyEnd) - case r == eof: - return lx.errorf("unexpected EOF; expected key separator '='") - case r == '.': - lx.ignore() - return lexKeyNameStart - case r == '=': - lx.emit(itemKeyEnd) - return lexSkip(lx, lexValue) - default: - return lx.errorf("expected '.' or '=', but got %q instead", r) - } -} - -// lexValue starts the consumption of a value anywhere a value is expected. -// lexValue will ignore whitespace. -// After a value is lexed, the last state on the next is popped and returned. -func lexValue(lx *lexer) stateFn { - // We allow whitespace to precede a value, but NOT newlines. - // In array syntax, the array states are responsible for ignoring newlines. - r := lx.next() - switch { - case isWhitespace(r): - return lexSkip(lx, lexValue) - case isDigit(r): - lx.backup() // avoid an extra state and use the same as above - return lexNumberOrDateStart - } - switch r { - case '[': - lx.ignore() - lx.emit(itemArray) - return lexArrayValue - case '{': - lx.ignore() - lx.emit(itemInlineTableStart) - return lexInlineTableValue - case '"': - if lx.accept('"') { - if lx.accept('"') { - lx.ignore() // Ignore """ - return lexMultilineString - } - lx.backup() - } - lx.ignore() // ignore the '"' - return lexString - case '\'': - if lx.accept('\'') { - if lx.accept('\'') { - lx.ignore() // Ignore """ - return lexMultilineRawString - } - lx.backup() - } - lx.ignore() // ignore the "'" - return lexRawString - case '.': // special error case, be kind to users - return lx.errorf("floats must start with a digit, not '.'") - case 'i', 'n': - if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { - lx.emit(itemFloat) - return lx.pop() - } - case '-', '+': - return lexDecimalNumberStart - } - if unicode.IsLetter(r) { - // Be permissive here; lexBool will give a nice error if the - // user wrote something like - // x = foo - // (i.e. not 'true' or 'false' but is something else word-like.) - lx.backup() - return lexBool - } - if r == eof { - return lx.errorf("unexpected EOF; expected value") - } - return lx.errorf("expected value but found %q instead", r) -} - -// lexArrayValue consumes one value in an array. It assumes that '[' or ',' -// have already been consumed. All whitespace and newlines are ignored. -func lexArrayValue(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValue) - case r == '#': - lx.push(lexArrayValue) - return lexCommentStart - case r == ',': - return lx.errorf("unexpected comma") - case r == ']': - return lexArrayEnd - } - - lx.backup() - lx.push(lexArrayValueEnd) - return lexValue -} - -// lexArrayValueEnd consumes everything between the end of an array value and -// the next value (or the end of the array): it ignores whitespace and newlines -// and expects either a ',' or a ']'. -func lexArrayValueEnd(lx *lexer) stateFn { - switch r := lx.next(); { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValueEnd) - case r == '#': - lx.push(lexArrayValueEnd) - return lexCommentStart - case r == ',': - lx.ignore() - return lexArrayValue // move on to the next value - case r == ']': - return lexArrayEnd - default: - return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r)) - } -} - -// lexArrayEnd finishes the lexing of an array. -// It assumes that a ']' has just been consumed. -func lexArrayEnd(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemArrayEnd) - return lx.pop() -} - -// lexInlineTableValue consumes one key/value pair in an inline table. -// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. -func lexInlineTableValue(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r): - return lexSkip(lx, lexInlineTableValue) - case isNL(r): - return lx.errorPrevLine(errLexInlineTableNL{}) - case r == '#': - lx.push(lexInlineTableValue) - return lexCommentStart - case r == ',': - return lx.errorf("unexpected comma") - case r == '}': - return lexInlineTableEnd - } - lx.backup() - lx.push(lexInlineTableValueEnd) - return lexKeyStart -} - -// lexInlineTableValueEnd consumes everything between the end of an inline table -// key/value pair and the next pair (or the end of the table): -// it ignores whitespace and expects either a ',' or a '}'. -func lexInlineTableValueEnd(lx *lexer) stateFn { - switch r := lx.next(); { - case isWhitespace(r): - return lexSkip(lx, lexInlineTableValueEnd) - case isNL(r): - return lx.errorPrevLine(errLexInlineTableNL{}) - case r == '#': - lx.push(lexInlineTableValueEnd) - return lexCommentStart - case r == ',': - lx.ignore() - lx.skip(isWhitespace) - if lx.peek() == '}' { - return lx.errorf("trailing comma not allowed in inline tables") - } - return lexInlineTableValue - case r == '}': - return lexInlineTableEnd - default: - return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r)) - } -} - -func runeOrEOF(r rune) string { - if r == eof { - return "end of file" - } - return "'" + string(r) + "'" -} - -// lexInlineTableEnd finishes the lexing of an inline table. -// It assumes that a '}' has just been consumed. -func lexInlineTableEnd(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemInlineTableEnd) - return lx.pop() -} - -// lexString consumes the inner contents of a string. It assumes that the -// beginning '"' has already been consumed and ignored. -func lexString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == eof: - return lx.errorf(`unexpected EOF; expected '"'`) - case isNL(r): - return lx.errorPrevLine(errLexStringNL{}) - case r == '\\': - lx.push(lexString) - return lexStringEscape - case r == '"': - lx.backup() - lx.emit(itemString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexString -} - -// lexMultilineString consumes the inner contents of a string. It assumes that -// the beginning '"""' has already been consumed and ignored. -func lexMultilineString(lx *lexer) stateFn { - r := lx.next() - switch r { - default: - return lexMultilineString - case eof: - return lx.errorf(`unexpected EOF; expected '"""'`) - case '\\': - return lexMultilineStringEscape - case '"': - /// Found " → try to read two more "". - if lx.accept('"') { - if lx.accept('"') { - /// Peek ahead: the string can contain " and "", including at the - /// end: """str""""" - /// 6 or more at the end, however, is an error. - if lx.peek() == '"' { - /// Check if we already lexed 5 's; if so we have 6 now, and - /// that's just too many man! - /// - /// Second check is for the edge case: - /// - /// two quotes allowed. - /// vv - /// """lol \"""""" - /// ^^ ^^^---- closing three - /// escaped - /// - /// But ugly, but it works - if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) { - return lx.errorf(`unexpected '""""""'`) - } - lx.backup() - lx.backup() - return lexMultilineString - } - - lx.backup() /// backup: don't include the """ in the item. - lx.backup() - lx.backup() - lx.emit(itemMultilineString) - lx.next() /// Read over ''' again and discard it. - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - return lexMultilineString - } -} - -// lexRawString consumes a raw string. Nothing can be escaped in such a string. -// It assumes that the beginning "'" has already been consumed and ignored. -func lexRawString(lx *lexer) stateFn { - r := lx.next() - switch { - default: - return lexRawString - case r == eof: - return lx.errorf(`unexpected EOF; expected "'"`) - case isNL(r): - return lx.errorPrevLine(errLexStringNL{}) - case r == '\'': - lx.backup() - lx.emit(itemRawString) - lx.next() - lx.ignore() - return lx.pop() - } -} - -// lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning ''' has already been consumed and -// ignored. -func lexMultilineRawString(lx *lexer) stateFn { - r := lx.next() - switch r { - default: - return lexMultilineRawString - case eof: - return lx.errorf(`unexpected EOF; expected "'''"`) - case '\'': - /// Found ' → try to read two more ''. - if lx.accept('\'') { - if lx.accept('\'') { - /// Peek ahead: the string can contain ' and '', including at the - /// end: '''str''''' - /// 6 or more at the end, however, is an error. - if lx.peek() == '\'' { - /// Check if we already lexed 5 's; if so we have 6 now, and - /// that's just too many man! - if strings.HasSuffix(lx.current(), "'''''") { - return lx.errorf(`unexpected "''''''"`) - } - lx.backup() - lx.backup() - return lexMultilineRawString - } - - lx.backup() /// backup: don't include the ''' in the item. - lx.backup() - lx.backup() - lx.emit(itemRawMultilineString) - lx.next() /// Read over ''' again and discard it. - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - return lexMultilineRawString - } -} - -// lexMultilineStringEscape consumes an escaped character. It assumes that the -// preceding '\\' has already been consumed. -func lexMultilineStringEscape(lx *lexer) stateFn { - if isNL(lx.next()) { /// \ escaping newline. - return lexMultilineString - } - lx.backup() - lx.push(lexMultilineString) - return lexStringEscape(lx) -} - -func lexStringEscape(lx *lexer) stateFn { - r := lx.next() - switch r { - case 'b': - fallthrough - case 't': - fallthrough - case 'n': - fallthrough - case 'f': - fallthrough - case 'r': - fallthrough - case '"': - fallthrough - case ' ', '\t': - // Inside """ .. """ strings you can use \ to escape newlines, and any - // amount of whitespace can be between the \ and \n. - fallthrough - case '\\': - return lx.pop() - case 'u': - return lexShortUnicodeEscape - case 'U': - return lexLongUnicodeEscape - } - return lx.error(errLexEscape{r}) -} - -func lexShortUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 4; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf( - `expected four hexadecimal digits after '\u', but got %q instead`, - lx.current()) - } - } - return lx.pop() -} - -func lexLongUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 8; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf( - `expected eight hexadecimal digits after '\U', but got %q instead`, - lx.current()) - } - } - return lx.pop() -} - -// lexNumberOrDateStart processes the first character of a value which begins -// with a digit. It exists to catch values starting with '0', so that -// lexBaseNumberOrDate can differentiate base prefixed integers from other -// types. -func lexNumberOrDateStart(lx *lexer) stateFn { - r := lx.next() - switch r { - case '0': - return lexBaseNumberOrDate - } - - if !isDigit(r) { - // The only way to reach this state is if the value starts - // with a digit, so specifically treat anything else as an - // error. - return lx.errorf("expected a digit but got %q", r) - } - - return lexNumberOrDate -} - -// lexNumberOrDate consumes either an integer, float or datetime. -func lexNumberOrDate(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexNumberOrDate - } - switch r { - case '-', ':': - return lexDatetime - case '_': - return lexDecimalNumber - case '.', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexDatetime consumes a Datetime, to a first approximation. -// The parser validates that it matches one of the accepted formats. -func lexDatetime(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexDatetime - } - switch r { - case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': - return lexDatetime - } - - lx.backup() - lx.emitTrim(itemDatetime) - return lx.pop() -} - -// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. -func lexHexInteger(lx *lexer) stateFn { - r := lx.next() - if isHexadecimal(r) { - return lexHexInteger - } - switch r { - case '_': - return lexHexInteger - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. -func lexOctalInteger(lx *lexer) stateFn { - r := lx.next() - if isOctal(r) { - return lexOctalInteger - } - switch r { - case '_': - return lexOctalInteger - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. -func lexBinaryInteger(lx *lexer) stateFn { - r := lx.next() - if isBinary(r) { - return lexBinaryInteger - } - switch r { - case '_': - return lexBinaryInteger - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexDecimalNumber consumes a decimal float or integer. -func lexDecimalNumber(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexDecimalNumber - } - switch r { - case '.', 'e', 'E': - return lexFloat - case '_': - return lexDecimalNumber - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexDecimalNumber consumes the first digit of a number beginning with a sign. -// It assumes the sign has already been consumed. Values which start with a sign -// are only allowed to be decimal integers or floats. -// -// The special "nan" and "inf" values are also recognized. -func lexDecimalNumberStart(lx *lexer) stateFn { - r := lx.next() - - // Special error cases to give users better error messages - switch r { - case 'i': - if !lx.accept('n') || !lx.accept('f') { - return lx.errorf("invalid float: '%s'", lx.current()) - } - lx.emit(itemFloat) - return lx.pop() - case 'n': - if !lx.accept('a') || !lx.accept('n') { - return lx.errorf("invalid float: '%s'", lx.current()) - } - lx.emit(itemFloat) - return lx.pop() - case '0': - p := lx.peek() - switch p { - case 'b', 'o', 'x': - return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) - } - case '.': - return lx.errorf("floats must start with a digit, not '.'") - } - - if isDigit(r) { - return lexDecimalNumber - } - - return lx.errorf("expected a digit but got %q", r) -} - -// lexBaseNumberOrDate differentiates between the possible values which -// start with '0'. It assumes that before reaching this state, the initial '0' -// has been consumed. -func lexBaseNumberOrDate(lx *lexer) stateFn { - r := lx.next() - // Note: All datetimes start with at least two digits, so we don't - // handle date characters (':', '-', etc.) here. - if isDigit(r) { - return lexNumberOrDate - } - switch r { - case '_': - // Can only be decimal, because there can't be an underscore - // between the '0' and the base designator, and dates can't - // contain underscores. - return lexDecimalNumber - case '.', 'e', 'E': - return lexFloat - case 'b': - r = lx.peek() - if !isBinary(r) { - lx.errorf("not a binary number: '%s%c'", lx.current(), r) - } - return lexBinaryInteger - case 'o': - r = lx.peek() - if !isOctal(r) { - lx.errorf("not an octal number: '%s%c'", lx.current(), r) - } - return lexOctalInteger - case 'x': - r = lx.peek() - if !isHexadecimal(r) { - lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) - } - return lexHexInteger - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexFloat consumes the elements of a float. It allows any sequence of -// float-like characters, so floats emitted by the lexer are only a first -// approximation and must be validated by the parser. -func lexFloat(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexFloat - } - switch r { - case '_', '.', '-', '+', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemFloat) - return lx.pop() -} - -// lexBool consumes a bool string: 'true' or 'false. -func lexBool(lx *lexer) stateFn { - var rs []rune - for { - r := lx.next() - if !unicode.IsLetter(r) { - lx.backup() - break - } - rs = append(rs, r) - } - s := string(rs) - switch s { - case "true", "false": - lx.emit(itemBool) - return lx.pop() - } - return lx.errorf("expected value but found %q instead", s) -} - -// lexCommentStart begins the lexing of a comment. It will emit -// itemCommentStart and consume no characters, passing control to lexComment. -func lexCommentStart(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemCommentStart) - return lexComment -} - -// lexComment lexes an entire comment. It assumes that '#' has been consumed. -// It will consume *up to* the first newline character, and pass control -// back to the last state on the stack. -func lexComment(lx *lexer) stateFn { - switch r := lx.next(); { - case isNL(r) || r == eof: - lx.backup() - lx.emit(itemText) - return lx.pop() - default: - return lexComment - } -} - -// lexSkip ignores all slurped input and moves on to the next state. -func lexSkip(lx *lexer, nextState stateFn) stateFn { - lx.ignore() - return nextState -} - -func (s stateFn) String() string { - name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() - if i := strings.LastIndexByte(name, '.'); i > -1 { - name = name[i+1:] - } - if s == nil { - name = "" - } - return name + "()" -} - -func (itype itemType) String() string { - switch itype { - case itemError: - return "Error" - case itemNIL: - return "NIL" - case itemEOF: - return "EOF" - case itemText: - return "Text" - case itemString, itemRawString, itemMultilineString, itemRawMultilineString: - return "String" - case itemBool: - return "Bool" - case itemInteger: - return "Integer" - case itemFloat: - return "Float" - case itemDatetime: - return "DateTime" - case itemTableStart: - return "TableStart" - case itemTableEnd: - return "TableEnd" - case itemKeyStart: - return "KeyStart" - case itemKeyEnd: - return "KeyEnd" - case itemArray: - return "Array" - case itemArrayEnd: - return "ArrayEnd" - case itemCommentStart: - return "CommentStart" - case itemInlineTableStart: - return "InlineTableStart" - case itemInlineTableEnd: - return "InlineTableEnd" - } - panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) -} - -func (item item) String() string { - return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) -} - -func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } -func isNL(r rune) bool { return r == '\n' || r == '\r' } -func isControl(r rune) bool { // Control characters except \t, \r, \n - switch r { - case '\t', '\r', '\n': - return false - default: - return (r >= 0x00 && r <= 0x1f) || r == 0x7f - } -} -func isDigit(r rune) bool { return r >= '0' && r <= '9' } -func isBinary(r rune) bool { return r == '0' || r == '1' } -func isOctal(r rune) bool { return r >= '0' && r <= '7' } -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') -} -func isBareKeyChar(r rune) bool { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' -} diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go deleted file mode 100644 index 71847a0415..0000000000 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ /dev/null @@ -1,121 +0,0 @@ -package toml - -import ( - "strings" -) - -// MetaData allows access to meta information about TOML data that's not -// accessible otherwise. -// -// It allows checking if a key is defined in the TOML data, whether any keys -// were undecoded, and the TOML type of a key. -type MetaData struct { - context Key // Used only during decoding. - - keyInfo map[string]keyInfo - mapping map[string]interface{} - keys []Key - decoded map[string]struct{} - data []byte // Input file; for errors. -} - -// IsDefined reports if the key exists in the TOML data. -// -// The key should be specified hierarchically, for example to access the TOML -// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive. -// -// Returns false for an empty key. -func (md *MetaData) IsDefined(key ...string) bool { - if len(key) == 0 { - return false - } - - var ( - hash map[string]interface{} - ok bool - hashOrVal interface{} = md.mapping - ) - for _, k := range key { - if hash, ok = hashOrVal.(map[string]interface{}); !ok { - return false - } - if hashOrVal, ok = hash[k]; !ok { - return false - } - } - return true -} - -// Type returns a string representation of the type of the key specified. -// -// Type will return the empty string if given an empty key or a key that does -// not exist. Keys are case sensitive. -func (md *MetaData) Type(key ...string) string { - if ki, ok := md.keyInfo[Key(key).String()]; ok { - return ki.tomlType.typeString() - } - return "" -} - -// Keys returns a slice of every key in the TOML data, including key groups. -// -// Each key is itself a slice, where the first element is the top of the -// hierarchy and the last is the most specific. The list will have the same -// order as the keys appeared in the TOML data. -// -// All keys returned are non-empty. -func (md *MetaData) Keys() []Key { - return md.keys -} - -// Undecoded returns all keys that have not been decoded in the order in which -// they appear in the original TOML document. -// -// This includes keys that haven't been decoded because of a [Primitive] value. -// Once the Primitive value is decoded, the keys will be considered decoded. -// -// Also note that decoding into an empty interface will result in no decoding, -// and so no keys will be considered decoded. -// -// In this sense, the Undecoded keys correspond to keys in the TOML document -// that do not have a concrete type in your representation. -func (md *MetaData) Undecoded() []Key { - undecoded := make([]Key, 0, len(md.keys)) - for _, key := range md.keys { - if _, ok := md.decoded[key.String()]; !ok { - undecoded = append(undecoded, key) - } - } - return undecoded -} - -// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get -// values of this type. -type Key []string - -func (k Key) String() string { - ss := make([]string, len(k)) - for i := range k { - ss[i] = k.maybeQuoted(i) - } - return strings.Join(ss, ".") -} - -func (k Key) maybeQuoted(i int) string { - if k[i] == "" { - return `""` - } - for _, c := range k[i] { - if !isBareKeyChar(c) { - return `"` + dblQuotedReplacer.Replace(k[i]) + `"` - } - } - return k[i] -} - -func (k Key) add(piece string) Key { - newKey := make(Key, len(k)+1) - copy(newKey, k) - newKey[len(k)] = piece - return newKey -} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go deleted file mode 100644 index d2542d6f92..0000000000 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ /dev/null @@ -1,781 +0,0 @@ -package toml - -import ( - "fmt" - "strconv" - "strings" - "time" - "unicode/utf8" - - "github.com/BurntSushi/toml/internal" -) - -type parser struct { - lx *lexer - context Key // Full key for the current hash in scope. - currentKey string // Base key name for everything except hashes. - pos Position // Current position in the TOML file. - - ordered []Key // List of keys in the order that they appear in the TOML data. - - keyInfo map[string]keyInfo // Map keyname → info about the TOML key. - mapping map[string]interface{} // Map keyname → key value. - implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). -} - -type keyInfo struct { - pos Position - tomlType tomlType -} - -func parse(data string) (p *parser, err error) { - defer func() { - if r := recover(); r != nil { - if pErr, ok := r.(ParseError); ok { - pErr.input = data - err = pErr - return - } - panic(r) - } - }() - - // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() - // which mangles stuff. - if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { - data = data[2:] - } - - // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 - // file (second byte in surrogate pair being NULL). Again, do this here to - // avoid having to deal with UTF-8/16 stuff in the lexer. - ex := 6 - if len(data) < 6 { - ex = len(data) - } - if i := strings.IndexRune(data[:ex], 0); i > -1 { - return nil, ParseError{ - Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", - Position: Position{Line: 1, Start: i, Len: 1}, - Line: 1, - input: data, - } - } - - p = &parser{ - keyInfo: make(map[string]keyInfo), - mapping: make(map[string]interface{}), - lx: lex(data), - ordered: make([]Key, 0), - implicits: make(map[string]struct{}), - } - for { - item := p.next() - if item.typ == itemEOF { - break - } - p.topLevel(item) - } - - return p, nil -} - -func (p *parser) panicErr(it item, err error) { - panic(ParseError{ - err: err, - Position: it.pos, - Line: it.pos.Len, - LastKey: p.current(), - }) -} - -func (p *parser) panicItemf(it item, format string, v ...interface{}) { - panic(ParseError{ - Message: fmt.Sprintf(format, v...), - Position: it.pos, - Line: it.pos.Len, - LastKey: p.current(), - }) -} - -func (p *parser) panicf(format string, v ...interface{}) { - panic(ParseError{ - Message: fmt.Sprintf(format, v...), - Position: p.pos, - Line: p.pos.Line, - LastKey: p.current(), - }) -} - -func (p *parser) next() item { - it := p.lx.nextItem() - //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val) - if it.typ == itemError { - if it.err != nil { - panic(ParseError{ - Position: it.pos, - Line: it.pos.Line, - LastKey: p.current(), - err: it.err, - }) - } - - p.panicItemf(it, "%s", it.val) - } - return it -} - -func (p *parser) nextPos() item { - it := p.next() - p.pos = it.pos - return it -} - -func (p *parser) bug(format string, v ...interface{}) { - panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) -} - -func (p *parser) expect(typ itemType) item { - it := p.next() - p.assertEqual(typ, it.typ) - return it -} - -func (p *parser) assertEqual(expected, got itemType) { - if expected != got { - p.bug("Expected '%s' but got '%s'.", expected, got) - } -} - -func (p *parser) topLevel(item item) { - switch item.typ { - case itemCommentStart: // # .. - p.expect(itemText) - case itemTableStart: // [ .. ] - name := p.nextPos() - - var key Key - for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { - key = append(key, p.keyString(name)) - } - p.assertEqual(itemTableEnd, name.typ) - - p.addContext(key, false) - p.setType("", tomlHash, item.pos) - p.ordered = append(p.ordered, key) - case itemArrayTableStart: // [[ .. ]] - name := p.nextPos() - - var key Key - for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { - key = append(key, p.keyString(name)) - } - p.assertEqual(itemArrayTableEnd, name.typ) - - p.addContext(key, true) - p.setType("", tomlArrayHash, item.pos) - p.ordered = append(p.ordered, key) - case itemKeyStart: // key = .. - outerContext := p.context - /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') - k := p.nextPos() - var key Key - for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { - key = append(key, p.keyString(k)) - } - p.assertEqual(itemKeyEnd, k.typ) - - /// The current key is the last part. - p.currentKey = key[len(key)-1] - - /// All the other parts (if any) are the context; need to set each part - /// as implicit. - context := key[:len(key)-1] - for i := range context { - p.addImplicitContext(append(p.context, context[i:i+1]...)) - } - - /// Set value. - vItem := p.next() - val, typ := p.value(vItem, false) - p.set(p.currentKey, val, typ, vItem.pos) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - - /// Remove the context we added (preserving any context from [tbl] lines). - p.context = outerContext - p.currentKey = "" - default: - p.bug("Unexpected type at top level: %s", item.typ) - } -} - -// Gets a string for a key (or part of a key in a table name). -func (p *parser) keyString(it item) string { - switch it.typ { - case itemText: - return it.val - case itemString, itemMultilineString, - itemRawString, itemRawMultilineString: - s, _ := p.value(it, false) - return s.(string) - default: - p.bug("Unexpected key type: %s", it.typ) - } - panic("unreachable") -} - -var datetimeRepl = strings.NewReplacer( - "z", "Z", - "t", "T", - " ", "T") - -// value translates an expected value from the lexer into a Go value wrapped -// as an empty interface. -func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { - switch it.typ { - case itemString: - return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) - case itemMultilineString: - return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) - case itemRawString: - return it.val, p.typeOfPrimitive(it) - case itemRawMultilineString: - return stripFirstNewline(it.val), p.typeOfPrimitive(it) - case itemInteger: - return p.valueInteger(it) - case itemFloat: - return p.valueFloat(it) - case itemBool: - switch it.val { - case "true": - return true, p.typeOfPrimitive(it) - case "false": - return false, p.typeOfPrimitive(it) - default: - p.bug("Expected boolean value, but got '%s'.", it.val) - } - case itemDatetime: - return p.valueDatetime(it) - case itemArray: - return p.valueArray(it) - case itemInlineTableStart: - return p.valueInlineTable(it, parentIsArray) - default: - p.bug("Unexpected value type: %s", it.typ) - } - panic("unreachable") -} - -func (p *parser) valueInteger(it item) (interface{}, tomlType) { - if !numUnderscoresOK(it.val) { - p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) - } - if numHasLeadingZero(it.val) { - p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val) - } - - num, err := strconv.ParseInt(it.val, 0, 64) - if err != nil { - // Distinguish integer values. Normally, it'd be a bug if the lexer - // provides an invalid integer, but it's possible that the number is - // out of range of valid values (which the lexer cannot determine). - // So mark the former as a bug but the latter as a legitimate user - // error. - if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { - p.panicErr(it, errParseRange{i: it.val, size: "int64"}) - } else { - p.bug("Expected integer value, but got '%s'.", it.val) - } - } - return num, p.typeOfPrimitive(it) -} - -func (p *parser) valueFloat(it item) (interface{}, tomlType) { - parts := strings.FieldsFunc(it.val, func(r rune) bool { - switch r { - case '.', 'e', 'E': - return true - } - return false - }) - for _, part := range parts { - if !numUnderscoresOK(part) { - p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val) - } - } - if len(parts) > 0 && numHasLeadingZero(parts[0]) { - p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val) - } - if !numPeriodsOK(it.val) { - // As a special case, numbers like '123.' or '1.e2', - // which are valid as far as Go/strconv are concerned, - // must be rejected because TOML says that a fractional - // part consists of '.' followed by 1+ digits. - p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) - } - val := strings.Replace(it.val, "_", "", -1) - if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. - val = "nan" - } - num, err := strconv.ParseFloat(val, 64) - if err != nil { - if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { - p.panicErr(it, errParseRange{i: it.val, size: "float64"}) - } else { - p.panicItemf(it, "Invalid float value: %q", it.val) - } - } - return num, p.typeOfPrimitive(it) -} - -var dtTypes = []struct { - fmt string - zone *time.Location -}{ - {time.RFC3339Nano, time.Local}, - {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, - {"2006-01-02", internal.LocalDate}, - {"15:04:05.999999999", internal.LocalTime}, -} - -func (p *parser) valueDatetime(it item) (interface{}, tomlType) { - it.val = datetimeRepl.Replace(it.val) - var ( - t time.Time - ok bool - err error - ) - for _, dt := range dtTypes { - t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) - if err == nil { - ok = true - break - } - } - if !ok { - p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val) - } - return t, p.typeOfPrimitive(it) -} - -func (p *parser) valueArray(it item) (interface{}, tomlType) { - p.setType(p.currentKey, tomlArray, it.pos) - - var ( - types []tomlType - - // Initialize to a non-nil empty slice. This makes it consistent with - // how S = [] decodes into a non-nil slice inside something like struct - // { S []string }. See #338 - array = []interface{}{} - ) - for it = p.next(); it.typ != itemArrayEnd; it = p.next() { - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } - - val, typ := p.value(it, true) - array = append(array, val) - types = append(types, typ) - - // XXX: types isn't used here, we need it to record the accurate type - // information. - // - // Not entirely sure how to best store this; could use "key[0]", - // "key[1]" notation, or maybe store it on the Array type? - } - return array, tomlArray -} - -func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { - var ( - hash = make(map[string]interface{}) - outerContext = p.context - outerKey = p.currentKey - ) - - p.context = append(p.context, p.currentKey) - prevContext := p.context - p.currentKey = "" - - p.addImplicit(p.context) - p.addContext(p.context, parentIsArray) - - /// Loop over all table key/value pairs. - for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } - - /// Read all key parts. - k := p.nextPos() - var key Key - for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { - key = append(key, p.keyString(k)) - } - p.assertEqual(itemKeyEnd, k.typ) - - /// The current key is the last part. - p.currentKey = key[len(key)-1] - - /// All the other parts (if any) are the context; need to set each part - /// as implicit. - context := key[:len(key)-1] - for i := range context { - p.addImplicitContext(append(p.context, context[i:i+1]...)) - } - - /// Set the value. - val, typ := p.value(p.next(), false) - p.set(p.currentKey, val, typ, it.pos) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - hash[p.currentKey] = val - - /// Restore context. - p.context = prevContext - } - p.context = outerContext - p.currentKey = outerKey - return hash, tomlHash -} - -// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', -// +/- signs, and base prefixes. -func numHasLeadingZero(s string) bool { - if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x - return true - } - if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { - return true - } - return false -} - -// numUnderscoresOK checks whether each underscore in s is surrounded by -// characters that are not underscores. -func numUnderscoresOK(s string) bool { - switch s { - case "nan", "+nan", "-nan", "inf", "-inf", "+inf": - return true - } - accept := false - for _, r := range s { - if r == '_' { - if !accept { - return false - } - } - - // isHexadecimal is a superset of all the permissable characters - // surrounding an underscore. - accept = isHexadecimal(r) - } - return accept -} - -// numPeriodsOK checks whether every period in s is followed by a digit. -func numPeriodsOK(s string) bool { - period := false - for _, r := range s { - if period && !isDigit(r) { - return false - } - period = r == '.' - } - return !period -} - -// Set the current context of the parser, where the context is either a hash or -// an array of hashes, depending on the value of the `array` parameter. -// -// Establishing the context also makes sure that the key isn't a duplicate, and -// will create implicit hashes automatically. -func (p *parser) addContext(key Key, array bool) { - var ok bool - - // Always start at the top level and drill down for our context. - hashContext := p.mapping - keyContext := make(Key, 0) - - // We only need implicit hashes for key[0:-1] - for _, k := range key[0 : len(key)-1] { - _, ok = hashContext[k] - keyContext = append(keyContext, k) - - // No key? Make an implicit hash and move on. - if !ok { - p.addImplicit(keyContext) - hashContext[k] = make(map[string]interface{}) - } - - // If the hash context is actually an array of tables, then set - // the hash context to the last element in that array. - // - // Otherwise, it better be a table, since this MUST be a key group (by - // virtue of it not being the last element in a key). - switch t := hashContext[k].(type) { - case []map[string]interface{}: - hashContext = t[len(t)-1] - case map[string]interface{}: - hashContext = t - default: - p.panicf("Key '%s' was already created as a hash.", keyContext) - } - } - - p.context = keyContext - if array { - // If this is the first element for this array, then allocate a new - // list of tables for it. - k := key[len(key)-1] - if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 4) - } - - // Add a new table. But make sure the key hasn't already been used - // for something else. - if hash, ok := hashContext[k].([]map[string]interface{}); ok { - hashContext[k] = append(hash, make(map[string]interface{})) - } else { - p.panicf("Key '%s' was already created and cannot be used as an array.", key) - } - } else { - p.setValue(key[len(key)-1], make(map[string]interface{})) - } - p.context = append(p.context, key[len(key)-1]) -} - -// set calls setValue and setType. -func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { - p.setValue(key, val) - p.setType(key, typ, pos) - -} - -// setValue sets the given key to the given value in the current context. -// It will make sure that the key hasn't already been defined, account for -// implicit key groups. -func (p *parser) setValue(key string, value interface{}) { - var ( - tmpHash interface{} - ok bool - hash = p.mapping - keyContext Key - ) - for _, k := range p.context { - keyContext = append(keyContext, k) - if tmpHash, ok = hash[k]; !ok { - p.bug("Context for key '%s' has not been established.", keyContext) - } - switch t := tmpHash.(type) { - case []map[string]interface{}: - // The context is a table of hashes. Pick the most recent table - // defined as the current hash. - hash = t[len(t)-1] - case map[string]interface{}: - hash = t - default: - p.panicf("Key '%s' has already been defined.", keyContext) - } - } - keyContext = append(keyContext, key) - - if _, ok := hash[key]; ok { - // Normally redefining keys isn't allowed, but the key could have been - // defined implicitly and it's allowed to be redefined concretely. (See - // the `valid/implicit-and-explicit-after.toml` in toml-test) - // - // But we have to make sure to stop marking it as an implicit. (So that - // another redefinition provokes an error.) - // - // Note that since it has already been defined (as a hash), we don't - // want to overwrite it. So our business is done. - if p.isArray(keyContext) { - p.removeImplicit(keyContext) - hash[key] = value - return - } - if p.isImplicit(keyContext) { - p.removeImplicit(keyContext) - return - } - - // Otherwise, we have a concrete key trying to override a previous - // key, which is *always* wrong. - p.panicf("Key '%s' has already been defined.", keyContext) - } - - hash[key] = value -} - -// setType sets the type of a particular value at a given key. It should be -// called immediately AFTER setValue. -// -// Note that if `key` is empty, then the type given will be applied to the -// current context (which is either a table or an array of tables). -func (p *parser) setType(key string, typ tomlType, pos Position) { - keyContext := make(Key, 0, len(p.context)+1) - keyContext = append(keyContext, p.context...) - if len(key) > 0 { // allow type setting for hashes - keyContext = append(keyContext, key) - } - // Special case to make empty keys ("" = 1) work. - // Without it it will set "" rather than `""`. - // TODO: why is this needed? And why is this only needed here? - if len(keyContext) == 0 { - keyContext = Key{""} - } - p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos} -} - -// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and -// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). -func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } -func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } -func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } -func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } -func (p *parser) addImplicitContext(key Key) { - p.addImplicit(key) - p.addContext(key, false) -} - -// current returns the full key name of the current context. -func (p *parser) current() string { - if len(p.currentKey) == 0 { - return p.context.String() - } - if len(p.context) == 0 { - return p.currentKey - } - return fmt.Sprintf("%s.%s", p.context, p.currentKey) -} - -func stripFirstNewline(s string) string { - if len(s) > 0 && s[0] == '\n' { - return s[1:] - } - if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { - return s[2:] - } - return s -} - -// Remove newlines inside triple-quoted strings if a line ends with "\". -func (p *parser) stripEscapedNewlines(s string) string { - split := strings.Split(s, "\n") - if len(split) < 1 { - return s - } - - escNL := false // Keep track of the last non-blank line was escaped. - for i, line := range split { - line = strings.TrimRight(line, " \t\r") - - if len(line) == 0 || line[len(line)-1] != '\\' { - split[i] = strings.TrimRight(split[i], "\r") - if !escNL && i != len(split)-1 { - split[i] += "\n" - } - continue - } - - escBS := true - for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { - escBS = !escBS - } - if escNL { - line = strings.TrimLeft(line, " \t\r") - } - escNL = !escBS - - if escBS { - split[i] += "\n" - continue - } - - if i == len(split)-1 { - p.panicf("invalid escape: '\\ '") - } - - split[i] = line[:len(line)-1] // Remove \ - if len(split)-1 > i { - split[i+1] = strings.TrimLeft(split[i+1], " \t\r") - } - } - return strings.Join(split, "") -} - -func (p *parser) replaceEscapes(it item, str string) string { - replaced := make([]rune, 0, len(str)) - s := []byte(str) - r := 0 - for r < len(s) { - if s[r] != '\\' { - c, size := utf8.DecodeRune(s[r:]) - r += size - replaced = append(replaced, c) - continue - } - r += 1 - if r >= len(s) { - p.bug("Escape sequence at end of string.") - return "" - } - switch s[r] { - default: - p.bug("Expected valid escape code after \\, but got %q.", s[r]) - case ' ', '\t': - p.panicItemf(it, "invalid escape: '\\%c'", s[r]) - case 'b': - replaced = append(replaced, rune(0x0008)) - r += 1 - case 't': - replaced = append(replaced, rune(0x0009)) - r += 1 - case 'n': - replaced = append(replaced, rune(0x000A)) - r += 1 - case 'f': - replaced = append(replaced, rune(0x000C)) - r += 1 - case 'r': - replaced = append(replaced, rune(0x000D)) - r += 1 - case '"': - replaced = append(replaced, rune(0x0022)) - r += 1 - case '\\': - replaced = append(replaced, rune(0x005C)) - r += 1 - case 'u': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+5). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5]) - replaced = append(replaced, escaped) - r += 5 - case 'U': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+9). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9]) - replaced = append(replaced, escaped) - r += 9 - } - } - return string(replaced) -} - -func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune { - s := string(bs) - hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) - if err != nil { - p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) - } - if !utf8.ValidRune(rune(hex)) { - p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s) - } - return rune(hex) -} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go deleted file mode 100644 index 254ca82e54..0000000000 --- a/vendor/github.com/BurntSushi/toml/type_fields.go +++ /dev/null @@ -1,242 +0,0 @@ -package toml - -// Struct field handling is adapted from code in encoding/json: -// -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the Go distribution. - -import ( - "reflect" - "sort" - "sync" -) - -// A field represents a single field found in a struct. -type field struct { - name string // the name of the field (`toml` tag included) - tag bool // whether field has a `toml` tag - index []int // represents the depth of an anonymous field - typ reflect.Type // the type of the field -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from toml tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that TOML should recognize for the given -// type. The algorithm is breadth-first search over the set of structs to -// include - the top struct and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - var count map[reflect.Type]int - var nextCount map[reflect.Type]int - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" && !sf.Anonymous { // unexported - continue - } - opts := getOptions(sf.Tag) - if opts.skip { - continue - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := opts.name != "" - name := opts.name - if name == "" { - name = sf.Name - } - fields = append(fields, field{name, tagged, index, ft}) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - f := field{name: ft.Name(), index: index, typ: ft} - next = append(next, f) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with TOML tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// TOML tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go deleted file mode 100644 index 4e90d77373..0000000000 --- a/vendor/github.com/BurntSushi/toml/type_toml.go +++ /dev/null @@ -1,70 +0,0 @@ -package toml - -// tomlType represents any Go type that corresponds to a TOML type. -// While the first draft of the TOML spec has a simplistic type system that -// probably doesn't need this level of sophistication, we seem to be militating -// toward adding real composite types. -type tomlType interface { - typeString() string -} - -// typeEqual accepts any two types and returns true if they are equal. -func typeEqual(t1, t2 tomlType) bool { - if t1 == nil || t2 == nil { - return false - } - return t1.typeString() == t2.typeString() -} - -func typeIsTable(t tomlType) bool { - return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) -} - -type tomlBaseType string - -func (btype tomlBaseType) typeString() string { - return string(btype) -} - -func (btype tomlBaseType) String() string { - return btype.typeString() -} - -var ( - tomlInteger tomlBaseType = "Integer" - tomlFloat tomlBaseType = "Float" - tomlDatetime tomlBaseType = "Datetime" - tomlString tomlBaseType = "String" - tomlBool tomlBaseType = "Bool" - tomlArray tomlBaseType = "Array" - tomlHash tomlBaseType = "Hash" - tomlArrayHash tomlBaseType = "ArrayHash" -) - -// typeOfPrimitive returns a tomlType of any primitive value in TOML. -// Primitive values are: Integer, Float, Datetime, String and Bool. -// -// Passing a lexer item other than the following will cause a BUG message -// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. -func (p *parser) typeOfPrimitive(lexItem item) tomlType { - switch lexItem.typ { - case itemInteger: - return tomlInteger - case itemFloat: - return tomlFloat - case itemDatetime: - return tomlDatetime - case itemString: - return tomlString - case itemMultilineString: - return tomlString - case itemRawString: - return tomlString - case itemRawMultilineString: - return tomlString - case itemBool: - return tomlBool - } - p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) - panic("unreachable") -} diff --git a/vendor/github.com/Microsoft/go-winio/.gitattributes b/vendor/github.com/Microsoft/go-winio/.gitattributes deleted file mode 100644 index 94f480de94..0000000000 --- a/vendor/github.com/Microsoft/go-winio/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -* text=auto eol=lf \ No newline at end of file diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore deleted file mode 100644 index 815e20660e..0000000000 --- a/vendor/github.com/Microsoft/go-winio/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -.vscode/ - -*.exe - -# testing -testdata - -# go workspaces -go.work -go.work.sum diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml deleted file mode 100644 index 7b503d26a3..0000000000 --- a/vendor/github.com/Microsoft/go-winio/.golangci.yml +++ /dev/null @@ -1,149 +0,0 @@ -run: - skip-dirs: - - pkg/etw/sample - -linters: - enable: - # style - - containedctx # struct contains a context - - dupl # duplicate code - - errname # erorrs are named correctly - - nolintlint # "//nolint" directives are properly explained - - revive # golint replacement - - unconvert # unnecessary conversions - - wastedassign - - # bugs, performance, unused, etc ... - - contextcheck # function uses a non-inherited context - - errorlint # errors not wrapped for 1.13 - - exhaustive # check exhaustiveness of enum switch statements - - gofmt # files are gofmt'ed - - gosec # security - - nilerr # returns nil even with non-nil error - - unparam # unused function params - -issues: - exclude-rules: - # err is very often shadowed in nested scopes - - linters: - - govet - text: '^shadow: declaration of "err" shadows declaration' - - # ignore long lines for skip autogen directives - - linters: - - revive - text: "^line-length-limit: " - source: "^//(go:generate|sys) " - - #TODO: remove after upgrading to go1.18 - # ignore comment spacing for nolint and sys directives - - linters: - - revive - text: "^comment-spacings: no space between comment delimiter and comment text" - source: "//(cspell:|nolint:|sys |todo)" - - # not on go 1.18 yet, so no any - - linters: - - revive - text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'" - - # allow unjustified ignores of error checks in defer statements - - linters: - - nolintlint - text: "^directive `//nolint:errcheck` should provide explanation" - source: '^\s*defer ' - - # allow unjustified ignores of error lints for io.EOF - - linters: - - nolintlint - text: "^directive `//nolint:errorlint` should provide explanation" - source: '[=|!]= io.EOF' - - -linters-settings: - exhaustive: - default-signifies-exhaustive: true - govet: - enable-all: true - disable: - # struct order is often for Win32 compat - # also, ignore pointer bytes/GC issues for now until performance becomes an issue - - fieldalignment - check-shadowing: true - nolintlint: - allow-leading-space: false - require-explanation: true - require-specific: true - revive: - # revive is more configurable than static check, so likely the preferred alternative to static-check - # (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997) - enable-all-rules: - true - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md - rules: - # rules with required arguments - - name: argument-limit - disabled: true - - name: banned-characters - disabled: true - - name: cognitive-complexity - disabled: true - - name: cyclomatic - disabled: true - - name: file-header - disabled: true - - name: function-length - disabled: true - - name: function-result-limit - disabled: true - - name: max-public-structs - disabled: true - # geneally annoying rules - - name: add-constant # complains about any and all strings and integers - disabled: true - - name: confusing-naming # we frequently use "Foo()" and "foo()" together - disabled: true - - name: flag-parameter # excessive, and a common idiom we use - disabled: true - - name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead - disabled: true - # general config - - name: line-length-limit - arguments: - - 140 - - name: var-naming - arguments: - - [] - - - CID - - CRI - - CTRD - - DACL - - DLL - - DOS - - ETW - - FSCTL - - GCS - - GMSA - - HCS - - HV - - IO - - LCOW - - LDAP - - LPAC - - LTSC - - MMIO - - NT - - OCI - - PMEM - - PWSH - - RX - - SACl - - SID - - SMB - - TX - - VHD - - VHDX - - VMID - - VPCI - - WCOW - - WIM diff --git a/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/vendor/github.com/Microsoft/go-winio/CODEOWNERS deleted file mode 100644 index ae1b4942b9..0000000000 --- a/vendor/github.com/Microsoft/go-winio/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ - * @microsoft/containerplat diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE deleted file mode 100644 index b8b569d774..0000000000 --- a/vendor/github.com/Microsoft/go-winio/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md deleted file mode 100644 index 7474b4f0b6..0000000000 --- a/vendor/github.com/Microsoft/go-winio/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) - -This repository contains utilities for efficiently performing Win32 IO operations in -Go. Currently, this is focused on accessing named pipes and other file handles, and -for using named pipes as a net transport. - -This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go -to reuse the thread to schedule another goroutine. This limits support to Windows Vista and -newer operating systems. This is similar to the implementation of network sockets in Go's net -package. - -Please see the LICENSE file for licensing information. - -## Contributing - -This project welcomes contributions and suggestions. -Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that -you have the right to, and actually do, grant us the rights to use your contribution. -For details, visit [Microsoft CLA](https://cla.microsoft.com). - -When you submit a pull request, a CLA-bot will automatically determine whether you need to -provide a CLA and decorate the PR appropriately (e.g., label, comment). -Simply follow the instructions provided by the bot. -You will only need to do this once across all repos using our CLA. - -Additionally, the pull request pipeline requires the following steps to be performed before -mergining. - -### Code Sign-Off - -We require that contributors sign their commits using [`git commit --signoff`][git-commit-s] -to certify they either authored the work themselves or otherwise have permission to use it in this project. - -A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s]. - -Please see [the developer certificate](https://developercertificate.org) for more info, -as well as to make sure that you can attest to the rules listed. -Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. - -### Linting - -Code must pass a linting stage, which uses [`golangci-lint`][lint]. -The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run -automatically with VSCode by adding the following to your workspace or folder settings: - -```json - "go.lintTool": "golangci-lint", - "go.lintOnSave": "package", -``` - -Additional editor [integrations options are also available][lint-ide]. - -Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root: - -```shell -# use . or specify a path to only lint a package -# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0" -> golangci-lint run ./... -``` - -### Go Generate - -The pipeline checks that auto-generated code, via `go generate`, are up to date. - -This can be done for the entire repo: - -```shell -> go generate ./... -``` - -## Code of Conduct - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - -## Special Thanks - -Thanks to [natefinch][natefinch] for the inspiration for this library. -See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation. - -[lint]: https://golangci-lint.run/ -[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration -[lint-install]: https://golangci-lint.run/usage/install/#local-installation - -[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s -[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff - -[natefinch]: https://github.com/natefinch diff --git a/vendor/github.com/Microsoft/go-winio/SECURITY.md b/vendor/github.com/Microsoft/go-winio/SECURITY.md deleted file mode 100644 index 869fdfe2b2..0000000000 --- a/vendor/github.com/Microsoft/go-winio/SECURITY.md +++ /dev/null @@ -1,41 +0,0 @@ - - -## Security - -Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). - -If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. - -## Reporting Security Issues - -**Please do not report security vulnerabilities through public GitHub issues.** - -Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). - -If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). - -You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). - -Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: - - * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) - * Full paths of source file(s) related to the manifestation of the issue - * The location of the affected source code (tag/branch/commit or direct URL) - * Any special configuration required to reproduce the issue - * Step-by-step instructions to reproduce the issue - * Proof-of-concept or exploit code (if possible) - * Impact of the issue, including how an attacker might exploit the issue - -This information will help us triage your report more quickly. - -If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. - -## Preferred Languages - -We prefer all communications to be in English. - -## Policy - -Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). - - diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go deleted file mode 100644 index 09621c8846..0000000000 --- a/vendor/github.com/Microsoft/go-winio/backup.go +++ /dev/null @@ -1,290 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "os" - "runtime" - "syscall" - "unicode/utf16" - - "golang.org/x/sys/windows" -) - -//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead -//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite - -const ( - BackupData = uint32(iota + 1) - BackupEaData - BackupSecurity - BackupAlternateData - BackupLink - BackupPropertyData - BackupObjectId //revive:disable-line:var-naming ID, not Id - BackupReparseData - BackupSparseBlock - BackupTxfsData -) - -const ( - StreamSparseAttributes = uint32(8) -) - -//nolint:revive // var-naming: ALL_CAPS -const ( - WRITE_DAC = windows.WRITE_DAC - WRITE_OWNER = windows.WRITE_OWNER - ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY -) - -// BackupHeader represents a backup stream of a file. -type BackupHeader struct { - //revive:disable-next-line:var-naming ID, not Id - Id uint32 // The backup stream ID - Attributes uint32 // Stream attributes - Size int64 // The size of the stream in bytes - Name string // The name of the stream (for BackupAlternateData only). - Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). -} - -type win32StreamID struct { - StreamID uint32 - Attributes uint32 - Size uint64 - NameSize uint32 -} - -// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series -// of BackupHeader values. -type BackupStreamReader struct { - r io.Reader - bytesLeft int64 -} - -// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. -func NewBackupStreamReader(r io.Reader) *BackupStreamReader { - return &BackupStreamReader{r, 0} -} - -// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if -// it was not completely read. -func (r *BackupStreamReader) Next() (*BackupHeader, error) { - if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this - if s, ok := r.r.(io.Seeker); ok { - // Make sure Seek on io.SeekCurrent sometimes succeeds - // before trying the actual seek. - if _, err := s.Seek(0, io.SeekCurrent); err == nil { - if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { - return nil, err - } - r.bytesLeft = 0 - } - } - if _, err := io.Copy(io.Discard, r); err != nil { - return nil, err - } - } - var wsi win32StreamID - if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { - return nil, err - } - hdr := &BackupHeader{ - Id: wsi.StreamID, - Attributes: wsi.Attributes, - Size: int64(wsi.Size), - } - if wsi.NameSize != 0 { - name := make([]uint16, int(wsi.NameSize/2)) - if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { - return nil, err - } - hdr.Name = syscall.UTF16ToString(name) - } - if wsi.StreamID == BackupSparseBlock { - if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { - return nil, err - } - hdr.Size -= 8 - } - r.bytesLeft = hdr.Size - return hdr, nil -} - -// Read reads from the current backup stream. -func (r *BackupStreamReader) Read(b []byte) (int, error) { - if r.bytesLeft == 0 { - return 0, io.EOF - } - if int64(len(b)) > r.bytesLeft { - b = b[:r.bytesLeft] - } - n, err := r.r.Read(b) - r.bytesLeft -= int64(n) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } else if r.bytesLeft == 0 && err == nil { - err = io.EOF - } - return n, err -} - -// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. -type BackupStreamWriter struct { - w io.Writer - bytesLeft int64 -} - -// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. -func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { - return &BackupStreamWriter{w, 0} -} - -// WriteHeader writes the next backup stream header and prepares for calls to Write(). -func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { - if w.bytesLeft != 0 { - return fmt.Errorf("missing %d bytes", w.bytesLeft) - } - name := utf16.Encode([]rune(hdr.Name)) - wsi := win32StreamID{ - StreamID: hdr.Id, - Attributes: hdr.Attributes, - Size: uint64(hdr.Size), - NameSize: uint32(len(name) * 2), - } - if hdr.Id == BackupSparseBlock { - // Include space for the int64 block offset - wsi.Size += 8 - } - if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { - return err - } - if len(name) != 0 { - if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { - return err - } - } - if hdr.Id == BackupSparseBlock { - if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { - return err - } - } - w.bytesLeft = hdr.Size - return nil -} - -// Write writes to the current backup stream. -func (w *BackupStreamWriter) Write(b []byte) (int, error) { - if w.bytesLeft < int64(len(b)) { - return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) - } - n, err := w.w.Write(b) - w.bytesLeft -= int64(n) - return n, err -} - -// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. -type BackupFileReader struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, -// Read will attempt to read the security descriptor of the file. -func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { - r := &BackupFileReader{f, includeSecurity, 0} - return r -} - -// Read reads a backup stream from the file by calling the Win32 API BackupRead(). -func (r *BackupFileReader) Read(b []byte) (int, error) { - var bytesRead uint32 - err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) - if err != nil { - return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} - } - runtime.KeepAlive(r.f) - if bytesRead == 0 { - return 0, io.EOF - } - return int(bytesRead), nil -} - -// Close frees Win32 resources associated with the BackupFileReader. It does not close -// the underlying file. -func (r *BackupFileReader) Close() error { - if r.ctx != 0 { - _ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) - runtime.KeepAlive(r.f) - r.ctx = 0 - } - return nil -} - -// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. -type BackupFileWriter struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, -// Write() will attempt to restore the security descriptor from the stream. -func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { - w := &BackupFileWriter{f, includeSecurity, 0} - return w -} - -// Write restores a portion of the file using the provided backup stream. -func (w *BackupFileWriter) Write(b []byte) (int, error) { - var bytesWritten uint32 - err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) - if err != nil { - return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} - } - runtime.KeepAlive(w.f) - if int(bytesWritten) != len(b) { - return int(bytesWritten), errors.New("not all bytes could be written") - } - return len(b), nil -} - -// Close frees Win32 resources associated with the BackupFileWriter. It does not -// close the underlying file. -func (w *BackupFileWriter) Close() error { - if w.ctx != 0 { - _ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) - runtime.KeepAlive(w.f) - w.ctx = 0 - } - return nil -} - -// OpenForBackup opens a file or directory, potentially skipping access checks if the backup -// or restore privileges have been acquired. -// -// If the file opened was a directory, it cannot be used with Readdir(). -func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { - winPath, err := syscall.UTF16FromString(path) - if err != nil { - return nil, err - } - h, err := syscall.CreateFile(&winPath[0], - access, - share, - nil, - createmode, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, - 0) - if err != nil { - err = &os.PathError{Op: "open", Path: path, Err: err} - return nil, err - } - return os.NewFile(uintptr(h), path), nil -} diff --git a/vendor/github.com/Microsoft/go-winio/doc.go b/vendor/github.com/Microsoft/go-winio/doc.go deleted file mode 100644 index 1f5bfe2d54..0000000000 --- a/vendor/github.com/Microsoft/go-winio/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// This package provides utilities for efficiently performing Win32 IO operations in Go. -// Currently, this package is provides support for genreal IO and management of -// - named pipes -// - files -// - [Hyper-V sockets] -// -// This code is similar to Go's [net] package, and uses IO completion ports to avoid -// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines. -// -// This limits support to Windows Vista and newer operating systems. -// -// Additionally, this package provides support for: -// - creating and managing GUIDs -// - writing to [ETW] -// - opening and manageing VHDs -// - parsing [Windows Image files] -// - auto-generating Win32 API code -// -// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service -// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw- -// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images -package winio diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go deleted file mode 100644 index e104dbdfdf..0000000000 --- a/vendor/github.com/Microsoft/go-winio/ea.go +++ /dev/null @@ -1,137 +0,0 @@ -package winio - -import ( - "bytes" - "encoding/binary" - "errors" -) - -type fileFullEaInformation struct { - NextEntryOffset uint32 - Flags uint8 - NameLength uint8 - ValueLength uint16 -} - -var ( - fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) - - errInvalidEaBuffer = errors.New("invalid extended attribute buffer") - errEaNameTooLarge = errors.New("extended attribute name too large") - errEaValueTooLarge = errors.New("extended attribute value too large") -) - -// ExtendedAttribute represents a single Windows EA. -type ExtendedAttribute struct { - Name string - Value []byte - Flags uint8 -} - -func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { - var info fileFullEaInformation - err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) - if err != nil { - err = errInvalidEaBuffer - return ea, nb, err - } - - nameOffset := fileFullEaInformationSize - nameLen := int(info.NameLength) - valueOffset := nameOffset + int(info.NameLength) + 1 - valueLen := int(info.ValueLength) - nextOffset := int(info.NextEntryOffset) - if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { - err = errInvalidEaBuffer - return ea, nb, err - } - - ea.Name = string(b[nameOffset : nameOffset+nameLen]) - ea.Value = b[valueOffset : valueOffset+valueLen] - ea.Flags = info.Flags - if info.NextEntryOffset != 0 { - nb = b[info.NextEntryOffset:] - } - return ea, nb, err -} - -// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION -// buffer retrieved from BackupRead, ZwQueryEaFile, etc. -func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { - for len(b) != 0 { - ea, nb, err := parseEa(b) - if err != nil { - return nil, err - } - - eas = append(eas, ea) - b = nb - } - return eas, err -} - -func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { - if int(uint8(len(ea.Name))) != len(ea.Name) { - return errEaNameTooLarge - } - if int(uint16(len(ea.Value))) != len(ea.Value) { - return errEaValueTooLarge - } - entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) - withPadding := (entrySize + 3) &^ 3 - nextOffset := uint32(0) - if !last { - nextOffset = withPadding - } - info := fileFullEaInformation{ - NextEntryOffset: nextOffset, - Flags: ea.Flags, - NameLength: uint8(len(ea.Name)), - ValueLength: uint16(len(ea.Value)), - } - - err := binary.Write(buf, binary.LittleEndian, &info) - if err != nil { - return err - } - - _, err = buf.Write([]byte(ea.Name)) - if err != nil { - return err - } - - err = buf.WriteByte(0) - if err != nil { - return err - } - - _, err = buf.Write(ea.Value) - if err != nil { - return err - } - - _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) - if err != nil { - return err - } - - return nil -} - -// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION -// buffer for use with BackupWrite, ZwSetEaFile, etc. -func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { - var buf bytes.Buffer - for i := range eas { - last := false - if i == len(eas)-1 { - last = true - } - - err := writeEa(&buf, &eas[i], last) - if err != nil { - return nil, err - } - } - return buf.Bytes(), nil -} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go deleted file mode 100644 index 175a99d3f4..0000000000 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ /dev/null @@ -1,331 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "errors" - "io" - "runtime" - "sync" - "sync/atomic" - "syscall" - "time" - - "golang.org/x/sys/windows" -) - -//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx -//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort -//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus -//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes -//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult - -type atomicBool int32 - -func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } -func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } -func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } - -//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg -func (b *atomicBool) swap(new bool) bool { - var newInt int32 - if new { - newInt = 1 - } - return atomic.SwapInt32((*int32)(b), newInt) == 1 -} - -var ( - ErrFileClosed = errors.New("file has already been closed") - ErrTimeout = &timeoutError{} -) - -type timeoutError struct{} - -func (*timeoutError) Error() string { return "i/o timeout" } -func (*timeoutError) Timeout() bool { return true } -func (*timeoutError) Temporary() bool { return true } - -type timeoutChan chan struct{} - -var ioInitOnce sync.Once -var ioCompletionPort syscall.Handle - -// ioResult contains the result of an asynchronous IO operation. -type ioResult struct { - bytes uint32 - err error -} - -// ioOperation represents an outstanding asynchronous Win32 IO. -type ioOperation struct { - o syscall.Overlapped - ch chan ioResult -} - -func initIO() { - h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) - if err != nil { - panic(err) - } - ioCompletionPort = h - go ioCompletionProcessor(h) -} - -// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. -// It takes ownership of this handle and will close it if it is garbage collected. -type win32File struct { - handle syscall.Handle - wg sync.WaitGroup - wgLock sync.RWMutex - closing atomicBool - socket bool - readDeadline deadlineHandler - writeDeadline deadlineHandler -} - -type deadlineHandler struct { - setLock sync.Mutex - channel timeoutChan - channelLock sync.RWMutex - timer *time.Timer - timedout atomicBool -} - -// makeWin32File makes a new win32File from an existing file handle. -func makeWin32File(h syscall.Handle) (*win32File, error) { - f := &win32File{handle: h} - ioInitOnce.Do(initIO) - _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) - if err != nil { - return nil, err - } - err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE) - if err != nil { - return nil, err - } - f.readDeadline.channel = make(timeoutChan) - f.writeDeadline.channel = make(timeoutChan) - return f, nil -} - -func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { - // If we return the result of makeWin32File directly, it can result in an - // interface-wrapped nil, rather than a nil interface value. - f, err := makeWin32File(h) - if err != nil { - return nil, err - } - return f, nil -} - -// closeHandle closes the resources associated with a Win32 handle. -func (f *win32File) closeHandle() { - f.wgLock.Lock() - // Atomically set that we are closing, releasing the resources only once. - if !f.closing.swap(true) { - f.wgLock.Unlock() - // cancel all IO and wait for it to complete - _ = cancelIoEx(f.handle, nil) - f.wg.Wait() - // at this point, no new IO can start - syscall.Close(f.handle) - f.handle = 0 - } else { - f.wgLock.Unlock() - } -} - -// Close closes a win32File. -func (f *win32File) Close() error { - f.closeHandle() - return nil -} - -// IsClosed checks if the file has been closed. -func (f *win32File) IsClosed() bool { - return f.closing.isSet() -} - -// prepareIO prepares for a new IO operation. -// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. -func (f *win32File) prepareIO() (*ioOperation, error) { - f.wgLock.RLock() - if f.closing.isSet() { - f.wgLock.RUnlock() - return nil, ErrFileClosed - } - f.wg.Add(1) - f.wgLock.RUnlock() - c := &ioOperation{} - c.ch = make(chan ioResult) - return c, nil -} - -// ioCompletionProcessor processes completed async IOs forever. -func ioCompletionProcessor(h syscall.Handle) { - for { - var bytes uint32 - var key uintptr - var op *ioOperation - err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) - if op == nil { - panic(err) - } - op.ch <- ioResult{bytes, err} - } -} - -// todo: helsaawy - create an asyncIO version that takes a context - -// asyncIO processes the return value from ReadFile or WriteFile, blocking until -// the operation has actually completed. -func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno - return int(bytes), err - } - - if f.closing.isSet() { - _ = cancelIoEx(f.handle, &c.o) - } - - var timeout timeoutChan - if d != nil { - d.channelLock.Lock() - timeout = d.channel - d.channelLock.Unlock() - } - - var r ioResult - select { - case r = <-c.ch: - err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno - if f.closing.isSet() { - err = ErrFileClosed - } - } else if err != nil && f.socket { - // err is from Win32. Query the overlapped structure to get the winsock error. - var bytes, flags uint32 - err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) - } - case <-timeout: - _ = cancelIoEx(f.handle, &c.o) - r = <-c.ch - err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno - err = ErrTimeout - } - } - - // runtime.KeepAlive is needed, as c is passed via native - // code to ioCompletionProcessor, c must remain alive - // until the channel read is complete. - // todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive? - runtime.KeepAlive(c) - return int(r.bytes), err -} - -// Read reads from a file handle. -func (f *win32File) Read(b []byte) (int, error) { - c, err := f.prepareIO() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.readDeadline.timedout.isSet() { - return 0, ErrTimeout - } - - var bytes uint32 - err = syscall.ReadFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIO(c, &f.readDeadline, bytes, err) - runtime.KeepAlive(b) - - // Handle EOF conditions. - if err == nil && n == 0 && len(b) != 0 { - return 0, io.EOF - } else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno - return 0, io.EOF - } else { - return n, err - } -} - -// Write writes to a file handle. -func (f *win32File) Write(b []byte) (int, error) { - c, err := f.prepareIO() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.writeDeadline.timedout.isSet() { - return 0, ErrTimeout - } - - var bytes uint32 - err = syscall.WriteFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) - runtime.KeepAlive(b) - return n, err -} - -func (f *win32File) SetReadDeadline(deadline time.Time) error { - return f.readDeadline.set(deadline) -} - -func (f *win32File) SetWriteDeadline(deadline time.Time) error { - return f.writeDeadline.set(deadline) -} - -func (f *win32File) Flush() error { - return syscall.FlushFileBuffers(f.handle) -} - -func (f *win32File) Fd() uintptr { - return uintptr(f.handle) -} - -func (d *deadlineHandler) set(deadline time.Time) error { - d.setLock.Lock() - defer d.setLock.Unlock() - - if d.timer != nil { - if !d.timer.Stop() { - <-d.channel - } - d.timer = nil - } - d.timedout.setFalse() - - select { - case <-d.channel: - d.channelLock.Lock() - d.channel = make(chan struct{}) - d.channelLock.Unlock() - default: - } - - if deadline.IsZero() { - return nil - } - - timeoutIO := func() { - d.timedout.setTrue() - close(d.channel) - } - - now := time.Now() - duration := deadline.Sub(now) - if deadline.After(now) { - // Deadline is in the future, set a timer to wait - d.timer = time.AfterFunc(duration, timeoutIO) - } else { - // Deadline is in the past. Cancel all pending IO now. - timeoutIO() - } - return nil -} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go deleted file mode 100644 index 702950e72a..0000000000 --- a/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ /dev/null @@ -1,92 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "os" - "runtime" - "unsafe" - - "golang.org/x/sys/windows" -) - -// FileBasicInfo contains file access time and file attributes information. -type FileBasicInfo struct { - CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime - FileAttributes uint32 - _ uint32 // padding -} - -// GetFileBasicInfo retrieves times and attributes for a file. -func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { - bi := &FileBasicInfo{} - if err := windows.GetFileInformationByHandleEx( - windows.Handle(f.Fd()), - windows.FileBasicInfo, - (*byte)(unsafe.Pointer(bi)), - uint32(unsafe.Sizeof(*bi)), - ); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return bi, nil -} - -// SetFileBasicInfo sets times and attributes for a file. -func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { - if err := windows.SetFileInformationByHandle( - windows.Handle(f.Fd()), - windows.FileBasicInfo, - (*byte)(unsafe.Pointer(bi)), - uint32(unsafe.Sizeof(*bi)), - ); err != nil { - return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return nil -} - -// FileStandardInfo contains extended information for the file. -// FILE_STANDARD_INFO in WinBase.h -// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info -type FileStandardInfo struct { - AllocationSize, EndOfFile int64 - NumberOfLinks uint32 - DeletePending, Directory bool -} - -// GetFileStandardInfo retrieves ended information for the file. -func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { - si := &FileStandardInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), - windows.FileStandardInfo, - (*byte)(unsafe.Pointer(si)), - uint32(unsafe.Sizeof(*si))); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return si, nil -} - -// FileIDInfo contains the volume serial number and file ID for a file. This pair should be -// unique on a system. -type FileIDInfo struct { - VolumeSerialNumber uint64 - FileID [16]byte -} - -// GetFileID retrieves the unique (volume, file ID) pair for a file. -func GetFileID(f *os.File) (*FileIDInfo, error) { - fileID := &FileIDInfo{} - if err := windows.GetFileInformationByHandleEx( - windows.Handle(f.Fd()), - windows.FileIdInfo, - (*byte)(unsafe.Pointer(fileID)), - uint32(unsafe.Sizeof(*fileID)), - ); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return fileID, nil -} diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go deleted file mode 100644 index c881916583..0000000000 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ /dev/null @@ -1,575 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "os" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/windows" - - "github.com/Microsoft/go-winio/internal/socket" - "github.com/Microsoft/go-winio/pkg/guid" -) - -const afHVSock = 34 // AF_HYPERV - -// Well known Service and VM IDs -// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards - -// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. -func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 - return guid.GUID{} -} - -// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. -func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff - return guid.GUID{ - Data1: 0xffffffff, - Data2: 0xffff, - Data3: 0xffff, - Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - } -} - -// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector. -func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838 - return guid.GUID{ - Data1: 0xe0e16197, - Data2: 0xdd56, - Data3: 0x4a10, - Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38}, - } -} - -// HvsockGUIDSiloHost is the address of a silo's host partition: -// - The silo host of a hosted silo is the utility VM. -// - The silo host of a silo on a physical host is the physical host. -func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568 - return guid.GUID{ - Data1: 0x36bd0c5c, - Data2: 0x7276, - Data3: 0x4223, - Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68}, - } -} - -// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions. -func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd - return guid.GUID{ - Data1: 0x90db8b89, - Data2: 0xd35, - Data3: 0x4f79, - Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd}, - } -} - -// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition. -// Listening on this VmId accepts connection from: -// - Inside silos: silo host partition. -// - Inside hosted silo: host of the VM. -// - Inside VM: VM host. -// - Physical host: Not supported. -func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878 - return guid.GUID{ - Data1: 0xa42e7cda, - Data2: 0xd03f, - Data3: 0x480c, - Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78}, - } -} - -// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol. -func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3 - return guid.GUID{ - Data2: 0xfacb, - Data3: 0x11e6, - Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3}, - } -} - -// An HvsockAddr is an address for a AF_HYPERV socket. -type HvsockAddr struct { - VMID guid.GUID - ServiceID guid.GUID -} - -type rawHvsockAddr struct { - Family uint16 - _ uint16 - VMID guid.GUID - ServiceID guid.GUID -} - -var _ socket.RawSockaddr = &rawHvsockAddr{} - -// Network returns the address's network name, "hvsock". -func (*HvsockAddr) Network() string { - return "hvsock" -} - -func (addr *HvsockAddr) String() string { - return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) -} - -// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. -func VsockServiceID(port uint32) guid.GUID { - g := hvsockVsockServiceTemplate() // make a copy - g.Data1 = port - return g -} - -func (addr *HvsockAddr) raw() rawHvsockAddr { - return rawHvsockAddr{ - Family: afHVSock, - VMID: addr.VMID, - ServiceID: addr.ServiceID, - } -} - -func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { - addr.VMID = raw.VMID - addr.ServiceID = raw.ServiceID -} - -// Sockaddr returns a pointer to and the size of this struct. -// -// Implements the [socket.RawSockaddr] interface, and allows use in -// [socket.Bind] and [socket.ConnectEx]. -func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) { - return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil -} - -// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`. -func (r *rawHvsockAddr) FromBytes(b []byte) error { - n := int(unsafe.Sizeof(rawHvsockAddr{})) - - if len(b) < n { - return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize) - } - - copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n]) - if r.Family != afHVSock { - return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily) - } - - return nil -} - -// HvsockListener is a socket listener for the AF_HYPERV address family. -type HvsockListener struct { - sock *win32File - addr HvsockAddr -} - -var _ net.Listener = &HvsockListener{} - -// HvsockConn is a connected socket of the AF_HYPERV address family. -type HvsockConn struct { - sock *win32File - local, remote HvsockAddr -} - -var _ net.Conn = &HvsockConn{} - -func newHVSocket() (*win32File, error) { - fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1) - if err != nil { - return nil, os.NewSyscallError("socket", err) - } - f, err := makeWin32File(fd) - if err != nil { - syscall.Close(fd) - return nil, err - } - f.socket = true - return f, nil -} - -// ListenHvsock listens for connections on the specified hvsock address. -func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { - l := &HvsockListener{addr: *addr} - sock, err := newHVSocket() - if err != nil { - return nil, l.opErr("listen", err) - } - sa := addr.raw() - err = socket.Bind(windows.Handle(sock.handle), &sa) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("socket", err)) - } - err = syscall.Listen(sock.handle, 16) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("listen", err)) - } - return &HvsockListener{sock: sock, addr: *addr}, nil -} - -func (l *HvsockListener) opErr(op string, err error) error { - return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} -} - -// Addr returns the listener's network address. -func (l *HvsockListener) Addr() net.Addr { - return &l.addr -} - -// Accept waits for the next connection and returns it. -func (l *HvsockListener) Accept() (_ net.Conn, err error) { - sock, err := newHVSocket() - if err != nil { - return nil, l.opErr("accept", err) - } - defer func() { - if sock != nil { - sock.Close() - } - }() - c, err := l.sock.prepareIO() - if err != nil { - return nil, l.opErr("accept", err) - } - defer l.sock.wg.Done() - - // AcceptEx, per documentation, requires an extra 16 bytes per address. - // - // https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex - const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) - var addrbuf [addrlen * 2]byte - - var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) - if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { - return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) - } - - conn := &HvsockConn{ - sock: sock, - } - // The local address returned in the AcceptEx buffer is the same as the Listener socket's - // address. However, the service GUID reported by GetSockName is different from the Listeners - // socket, and is sometimes the same as the local address of the socket that dialed the - // address, with the service GUID.Data1 incremented, but othertimes is different. - // todo: does the local address matter? is the listener's address or the actual address appropriate? - conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) - conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) - - // initialize the accepted socket and update its properties with those of the listening socket - if err = windows.Setsockopt(windows.Handle(sock.handle), - windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, - (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { - return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) - } - - sock = nil - return conn, nil -} - -// Close closes the listener, causing any pending Accept calls to fail. -func (l *HvsockListener) Close() error { - return l.sock.Close() -} - -// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]). -type HvsockDialer struct { - // Deadline is the time the Dial operation must connect before erroring. - Deadline time.Time - - // Retries is the number of additional connects to try if the connection times out, is refused, - // or the host is unreachable - Retries uint - - // RetryWait is the time to wait after a connection error to retry - RetryWait time.Duration - - rt *time.Timer // redial wait timer -} - -// Dial the Hyper-V socket at addr. -// -// See [HvsockDialer.Dial] for more information. -func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { - return (&HvsockDialer{}).Dial(ctx, addr) -} - -// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful. -// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between -// retries. -// -// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx. -func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { - op := "dial" - // create the conn early to use opErr() - conn = &HvsockConn{ - remote: *addr, - } - - if !d.Deadline.IsZero() { - var cancel context.CancelFunc - ctx, cancel = context.WithDeadline(ctx, d.Deadline) - defer cancel() - } - - // preemptive timeout/cancellation check - if err = ctx.Err(); err != nil { - return nil, conn.opErr(op, err) - } - - sock, err := newHVSocket() - if err != nil { - return nil, conn.opErr(op, err) - } - defer func() { - if sock != nil { - sock.Close() - } - }() - - sa := addr.raw() - err = socket.Bind(windows.Handle(sock.handle), &sa) - if err != nil { - return nil, conn.opErr(op, os.NewSyscallError("bind", err)) - } - - c, err := sock.prepareIO() - if err != nil { - return nil, conn.opErr(op, err) - } - defer sock.wg.Done() - var bytes uint32 - for i := uint(0); i <= d.Retries; i++ { - err = socket.ConnectEx( - windows.Handle(sock.handle), - &sa, - nil, // sendBuf - 0, // sendDataLen - &bytes, - (*windows.Overlapped)(unsafe.Pointer(&c.o))) - _, err = sock.asyncIO(c, nil, bytes, err) - if i < d.Retries && canRedial(err) { - if err = d.redialWait(ctx); err == nil { - continue - } - } - break - } - if err != nil { - return nil, conn.opErr(op, os.NewSyscallError("connectex", err)) - } - - // update the connection properties, so shutdown can be used - if err = windows.Setsockopt( - windows.Handle(sock.handle), - windows.SOL_SOCKET, - windows.SO_UPDATE_CONNECT_CONTEXT, - nil, // optvalue - 0, // optlen - ); err != nil { - return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err)) - } - - // get the local name - var sal rawHvsockAddr - err = socket.GetSockName(windows.Handle(sock.handle), &sal) - if err != nil { - return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) - } - conn.local.fromRaw(&sal) - - // one last check for timeout, since asyncIO doesn't check the context - if err = ctx.Err(); err != nil { - return nil, conn.opErr(op, err) - } - - conn.sock = sock - sock = nil - - return conn, nil -} - -// redialWait waits before attempting to redial, resetting the timer as appropriate. -func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { - if d.RetryWait == 0 { - return nil - } - - if d.rt == nil { - d.rt = time.NewTimer(d.RetryWait) - } else { - // should already be stopped and drained - d.rt.Reset(d.RetryWait) - } - - select { - case <-ctx.Done(): - case <-d.rt.C: - return nil - } - - // stop and drain the timer - if !d.rt.Stop() { - <-d.rt.C - } - return ctx.Err() -} - -// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall. -func canRedial(err error) bool { - //nolint:errorlint // guaranteed to be an Errno - switch err { - case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT, - windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL: - return true - default: - return false - } -} - -func (conn *HvsockConn) opErr(op string, err error) error { - // translate from "file closed" to "socket closed" - if errors.Is(err, ErrFileClosed) { - err = socket.ErrSocketClosed - } - return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} -} - -func (conn *HvsockConn) Read(b []byte) (int, error) { - c, err := conn.sock.prepareIO() - if err != nil { - return 0, conn.opErr("read", err) - } - defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var flags, bytes uint32 - err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) - n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) - if err != nil { - var eno windows.Errno - if errors.As(err, &eno) { - err = os.NewSyscallError("wsarecv", eno) - } - return 0, conn.opErr("read", err) - } else if n == 0 { - err = io.EOF - } - return n, err -} - -func (conn *HvsockConn) Write(b []byte) (int, error) { - t := 0 - for len(b) != 0 { - n, err := conn.write(b) - if err != nil { - return t + n, err - } - t += n - b = b[n:] - } - return t, nil -} - -func (conn *HvsockConn) write(b []byte) (int, error) { - c, err := conn.sock.prepareIO() - if err != nil { - return 0, conn.opErr("write", err) - } - defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var bytes uint32 - err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) - n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) - if err != nil { - var eno windows.Errno - if errors.As(err, &eno) { - err = os.NewSyscallError("wsasend", eno) - } - return 0, conn.opErr("write", err) - } - return n, err -} - -// Close closes the socket connection, failing any pending read or write calls. -func (conn *HvsockConn) Close() error { - return conn.sock.Close() -} - -func (conn *HvsockConn) IsClosed() bool { - return conn.sock.IsClosed() -} - -// shutdown disables sending or receiving on a socket. -func (conn *HvsockConn) shutdown(how int) error { - if conn.IsClosed() { - return socket.ErrSocketClosed - } - - err := syscall.Shutdown(conn.sock.handle, how) - if err != nil { - // If the connection was closed, shutdowns fail with "not connected" - if errors.Is(err, windows.WSAENOTCONN) || - errors.Is(err, windows.WSAESHUTDOWN) { - err = socket.ErrSocketClosed - } - return os.NewSyscallError("shutdown", err) - } - return nil -} - -// CloseRead shuts down the read end of the socket, preventing future read operations. -func (conn *HvsockConn) CloseRead() error { - err := conn.shutdown(syscall.SHUT_RD) - if err != nil { - return conn.opErr("closeread", err) - } - return nil -} - -// CloseWrite shuts down the write end of the socket, preventing future write operations and -// notifying the other endpoint that no more data will be written. -func (conn *HvsockConn) CloseWrite() error { - err := conn.shutdown(syscall.SHUT_WR) - if err != nil { - return conn.opErr("closewrite", err) - } - return nil -} - -// LocalAddr returns the local address of the connection. -func (conn *HvsockConn) LocalAddr() net.Addr { - return &conn.local -} - -// RemoteAddr returns the remote address of the connection. -func (conn *HvsockConn) RemoteAddr() net.Addr { - return &conn.remote -} - -// SetDeadline implements the net.Conn SetDeadline method. -func (conn *HvsockConn) SetDeadline(t time.Time) error { - // todo: implement `SetDeadline` for `win32File` - if err := conn.SetReadDeadline(t); err != nil { - return fmt.Errorf("set read deadline: %w", err) - } - if err := conn.SetWriteDeadline(t); err != nil { - return fmt.Errorf("set write deadline: %w", err) - } - return nil -} - -// SetReadDeadline implements the net.Conn SetReadDeadline method. -func (conn *HvsockConn) SetReadDeadline(t time.Time) error { - return conn.sock.SetReadDeadline(t) -} - -// SetWriteDeadline implements the net.Conn SetWriteDeadline method. -func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { - return conn.sock.SetWriteDeadline(t) -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go deleted file mode 100644 index 1f65388178..0000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// This package contains Win32 filesystem functionality. -package fs diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go deleted file mode 100644 index 509b3ec641..0000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go +++ /dev/null @@ -1,202 +0,0 @@ -//go:build windows - -package fs - -import ( - "golang.org/x/sys/windows" - - "github.com/Microsoft/go-winio/internal/stringbuffer" -) - -//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go - -// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew -//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW - -const NullHandle windows.Handle = 0 - -// AccessMask defines standard, specific, and generic rights. -// -// Bitmask: -// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 -// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 -// +---------------+---------------+-------------------------------+ -// |G|G|G|G|Resvd|A| StandardRights| SpecificRights | -// |R|W|E|A| |S| | | -// +-+-------------+---------------+-------------------------------+ -// -// GR Generic Read -// GW Generic Write -// GE Generic Exectue -// GA Generic All -// Resvd Reserved -// AS Access Security System -// -// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask -// -// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights -// -// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants -type AccessMask = windows.ACCESS_MASK - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // Not actually any. - // - // For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device" - // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters - FILE_ANY_ACCESS AccessMask = 0 - - // Specific Object Access - // from ntioapi.h - - FILE_READ_DATA AccessMask = (0x0001) // file & pipe - FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory - - FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe - FILE_ADD_FILE AccessMask = (0x0002) // directory - - FILE_APPEND_DATA AccessMask = (0x0004) // file - FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory - FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe - - FILE_READ_EA AccessMask = (0x0008) // file & directory - FILE_READ_PROPERTIES AccessMask = FILE_READ_EA - - FILE_WRITE_EA AccessMask = (0x0010) // file & directory - FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA - - FILE_EXECUTE AccessMask = (0x0020) // file - FILE_TRAVERSE AccessMask = (0x0020) // directory - - FILE_DELETE_CHILD AccessMask = (0x0040) // directory - - FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all - - FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all - - FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF) - FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE) - FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE) - FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE) - - SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF - - // Standard Access - // from ntseapi.h - - DELETE AccessMask = 0x0001_0000 - READ_CONTROL AccessMask = 0x0002_0000 - WRITE_DAC AccessMask = 0x0004_0000 - WRITE_OWNER AccessMask = 0x0008_0000 - SYNCHRONIZE AccessMask = 0x0010_0000 - - STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000 - - STANDARD_RIGHTS_READ AccessMask = READ_CONTROL - STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL - STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL - - STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000 -) - -type FileShareMode uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - FILE_SHARE_NONE FileShareMode = 0x00 - FILE_SHARE_READ FileShareMode = 0x01 - FILE_SHARE_WRITE FileShareMode = 0x02 - FILE_SHARE_DELETE FileShareMode = 0x04 - FILE_SHARE_VALID_FLAGS FileShareMode = 0x07 -) - -type FileCreationDisposition uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // from winbase.h - - CREATE_NEW FileCreationDisposition = 0x01 - CREATE_ALWAYS FileCreationDisposition = 0x02 - OPEN_EXISTING FileCreationDisposition = 0x03 - OPEN_ALWAYS FileCreationDisposition = 0x04 - TRUNCATE_EXISTING FileCreationDisposition = 0x05 -) - -// CreateFile and co. take flags or attributes together as one parameter. -// Define alias until we can use generics to allow both - -// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants -type FileFlagOrAttribute uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( // from winnt.h - FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 - FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 - FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 - FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000 - FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000 - FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000 - FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000 - FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000 - FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000 - FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000 - FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 -) - -type FileSQSFlag = FileFlagOrAttribute - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( // from winbase.h - SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) - SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) - SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) - SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) - - SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000 - SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000 -) - -// GetFinalPathNameByHandle flags -// -// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters -type GetFinalPathFlag uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - GetFinalPathDefaultFlag GetFinalPathFlag = 0x0 - - FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0 - FILE_NAME_OPENED GetFinalPathFlag = 0x8 - - VOLUME_NAME_DOS GetFinalPathFlag = 0x0 - VOLUME_NAME_GUID GetFinalPathFlag = 0x1 - VOLUME_NAME_NT GetFinalPathFlag = 0x2 - VOLUME_NAME_NONE GetFinalPathFlag = 0x4 -) - -// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle -// with the given handle and flags. It transparently takes care of creating a buffer of the -// correct size for the call. -// -// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew -func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) { - b := stringbuffer.NewWString() - //TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n? - for { - n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags)) - if err != nil { - return "", err - } - // If the buffer wasn't large enough, n will be the total size needed (including null terminator). - // Resize and try again. - if n > b.Cap() { - b.ResizeTo(n) - continue - } - // If the buffer is large enough, n will be the size not including the null terminator. - // Convert to a Go string and return. - return b.String(), nil - } -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/security.go b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go deleted file mode 100644 index 81760ac67e..0000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/security.go +++ /dev/null @@ -1,12 +0,0 @@ -package fs - -// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level -type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32` - -// Impersonation levels -const ( - SecurityAnonymous SecurityImpersonationLevel = 0 - SecurityIdentification SecurityImpersonationLevel = 1 - SecurityImpersonation SecurityImpersonationLevel = 2 - SecurityDelegation SecurityImpersonationLevel = 3 -) diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go deleted file mode 100644 index e2f7bb24e5..0000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go +++ /dev/null @@ -1,64 +0,0 @@ -//go:build windows - -// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. - -package fs - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procCreateFileW = modkernel32.NewProc("CreateFileW") -) - -func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) -} - -func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = windows.Handle(r0) - if handle == windows.InvalidHandle { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go deleted file mode 100644 index 7e82f9afa9..0000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go +++ /dev/null @@ -1,20 +0,0 @@ -package socket - -import ( - "unsafe" -) - -// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The -// struct must meet the Win32 sockaddr requirements specified here: -// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2 -// -// Specifically, the struct size must be least larger than an int16 (unsigned short) -// for the address family. -type RawSockaddr interface { - // Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing - // for the RawSockaddr's data to be overwritten by syscalls (if necessary). - // - // It is the callers responsibility to validate that the values are valid; invalid - // pointers or size can cause a panic. - Sockaddr() (unsafe.Pointer, int32, error) -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go deleted file mode 100644 index aeb7b7250f..0000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go +++ /dev/null @@ -1,179 +0,0 @@ -//go:build windows - -package socket - -import ( - "errors" - "fmt" - "net" - "sync" - "syscall" - "unsafe" - - "github.com/Microsoft/go-winio/pkg/guid" - "golang.org/x/sys/windows" -) - -//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go - -//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname -//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername -//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind - -const socketError = uintptr(^uint32(0)) - -var ( - // todo(helsaawy): create custom error types to store the desired vs actual size and addr family? - - ErrBufferSize = errors.New("buffer size") - ErrAddrFamily = errors.New("address family") - ErrInvalidPointer = errors.New("invalid pointer") - ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed) -) - -// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error) - -// GetSockName writes the local address of socket s to the [RawSockaddr] rsa. -// If rsa is not large enough, the [windows.WSAEFAULT] is returned. -func GetSockName(s windows.Handle, rsa RawSockaddr) error { - ptr, l, err := rsa.Sockaddr() - if err != nil { - return fmt.Errorf("could not retrieve socket pointer and size: %w", err) - } - - // although getsockname returns WSAEFAULT if the buffer is too small, it does not set - // &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy - return getsockname(s, ptr, &l) -} - -// GetPeerName returns the remote address the socket is connected to. -// -// See [GetSockName] for more information. -func GetPeerName(s windows.Handle, rsa RawSockaddr) error { - ptr, l, err := rsa.Sockaddr() - if err != nil { - return fmt.Errorf("could not retrieve socket pointer and size: %w", err) - } - - return getpeername(s, ptr, &l) -} - -func Bind(s windows.Handle, rsa RawSockaddr) (err error) { - ptr, l, err := rsa.Sockaddr() - if err != nil { - return fmt.Errorf("could not retrieve socket pointer and size: %w", err) - } - - return bind(s, ptr, l) -} - -// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the -// their sockaddr interface, so they cannot be used with HvsockAddr -// Replicate functionality here from -// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go - -// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at -// runtime via a WSAIoctl call: -// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks - -type runtimeFunc struct { - id guid.GUID - once sync.Once - addr uintptr - err error -} - -func (f *runtimeFunc) Load() error { - f.once.Do(func() { - var s windows.Handle - s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP) - if f.err != nil { - return - } - defer windows.CloseHandle(s) //nolint:errcheck - - var n uint32 - f.err = windows.WSAIoctl(s, - windows.SIO_GET_EXTENSION_FUNCTION_POINTER, - (*byte)(unsafe.Pointer(&f.id)), - uint32(unsafe.Sizeof(f.id)), - (*byte)(unsafe.Pointer(&f.addr)), - uint32(unsafe.Sizeof(f.addr)), - &n, - nil, // overlapped - 0, // completionRoutine - ) - }) - return f.err -} - -var ( - // todo: add `AcceptEx` and `GetAcceptExSockaddrs` - WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS - Data1: 0x25a207b9, - Data2: 0xddf3, - Data3: 0x4660, - Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, - } - - connectExFunc = runtimeFunc{id: WSAID_CONNECTEX} -) - -func ConnectEx( - fd windows.Handle, - rsa RawSockaddr, - sendBuf *byte, - sendDataLen uint32, - bytesSent *uint32, - overlapped *windows.Overlapped, -) error { - if err := connectExFunc.Load(); err != nil { - return fmt.Errorf("failed to load ConnectEx function pointer: %w", err) - } - ptr, n, err := rsa.Sockaddr() - if err != nil { - return err - } - return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) -} - -// BOOL LpfnConnectex( -// [in] SOCKET s, -// [in] const sockaddr *name, -// [in] int namelen, -// [in, optional] PVOID lpSendBuffer, -// [in] DWORD dwSendDataLength, -// [out] LPDWORD lpdwBytesSent, -// [in] LPOVERLAPPED lpOverlapped -// ) - -func connectEx( - s windows.Handle, - name unsafe.Pointer, - namelen int32, - sendBuf *byte, - sendDataLen uint32, - bytesSent *uint32, - overlapped *windows.Overlapped, -) (err error) { - // todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN - r1, _, e1 := syscall.Syscall9(connectExFunc.addr, - 7, - uintptr(s), - uintptr(name), - uintptr(namelen), - uintptr(unsafe.Pointer(sendBuf)), - uintptr(sendDataLen), - uintptr(unsafe.Pointer(bytesSent)), - uintptr(unsafe.Pointer(overlapped)), - 0, - 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return err -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go deleted file mode 100644 index 6d2e1a9e44..0000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build windows - -// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. - -package socket - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - - procbind = modws2_32.NewProc("bind") - procgetpeername = modws2_32.NewProc("getpeername") - procgetsockname = modws2_32.NewProc("getsockname") -) - -func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socketError { - err = errnoErr(e1) - } - return -} - -func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) - if r1 == socketError { - err = errnoErr(e1) - } - return -} - -func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) - if r1 == socketError { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go deleted file mode 100644 index 7ad5057024..0000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go +++ /dev/null @@ -1,132 +0,0 @@ -package stringbuffer - -import ( - "sync" - "unicode/utf16" -) - -// TODO: worth exporting and using in mkwinsyscall? - -// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate -// large path strings: -// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310. -const MinWStringCap = 310 - -// use *[]uint16 since []uint16 creates an extra allocation where the slice header -// is copied to heap and then referenced via pointer in the interface header that sync.Pool -// stores. -var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly - New: func() interface{} { - b := make([]uint16, MinWStringCap) - return &b - }, -} - -func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) } - -// freeBuffer copies the slice header data, and puts a pointer to that in the pool. -// This avoids taking a pointer to the slice header in WString, which can be set to nil. -func freeBuffer(b []uint16) { pathPool.Put(&b) } - -// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings -// for interacting with Win32 APIs. -// Sizes are specified as uint32 and not int. -// -// It is not thread safe. -type WString struct { - // type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future. - - // raw buffer - b []uint16 -} - -// NewWString returns a [WString] allocated from a shared pool with an -// initial capacity of at least [MinWStringCap]. -// Since the buffer may have been previously used, its contents are not guaranteed to be empty. -// -// The buffer should be freed via [WString.Free] -func NewWString() *WString { - return &WString{ - b: newBuffer(), - } -} - -func (b *WString) Free() { - if b.empty() { - return - } - freeBuffer(b.b) - b.b = nil -} - -// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the -// previous buffer back into pool. -func (b *WString) ResizeTo(c uint32) uint32 { - // allready sufficient (or n is 0) - if c <= b.Cap() { - return b.Cap() - } - - if c <= MinWStringCap { - c = MinWStringCap - } - // allocate at-least double buffer size, as is done in [bytes.Buffer] and other places - if c <= 2*b.Cap() { - c = 2 * b.Cap() - } - - b2 := make([]uint16, c) - if !b.empty() { - copy(b2, b.b) - freeBuffer(b.b) - } - b.b = b2 - return c -} - -// Buffer returns the underlying []uint16 buffer. -func (b *WString) Buffer() []uint16 { - if b.empty() { - return nil - } - return b.b -} - -// Pointer returns a pointer to the first uint16 in the buffer. -// If the [WString.Free] has already been called, the pointer will be nil. -func (b *WString) Pointer() *uint16 { - if b.empty() { - return nil - } - return &b.b[0] -} - -// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer. -// -// It assumes that the data is null-terminated. -func (b *WString) String() string { - // Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows" - // and would make this code Windows-only, which makes no sense. - // So copy UTF16ToString code into here. - // If other windows-specific code is added, switch to [windows.UTF16ToString] - - s := b.b - for i, v := range s { - if v == 0 { - s = s[:i] - break - } - } - return string(utf16.Decode(s)) -} - -// Cap returns the underlying buffer capacity. -func (b *WString) Cap() uint32 { - if b.empty() { - return 0 - } - return b.cap() -} - -func (b *WString) cap() uint32 { return uint32(cap(b.b)) } -func (b *WString) empty() bool { return b == nil || b.cap() == 0 } diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go deleted file mode 100644 index 25cc811031..0000000000 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ /dev/null @@ -1,525 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "os" - "runtime" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/windows" - - "github.com/Microsoft/go-winio/internal/fs" -) - -//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe -//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW -//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo -//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW -//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc -//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile -//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb -//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U -//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl - -type ioStatusBlock struct { - Status, Information uintptr -} - -type objectAttributes struct { - Length uintptr - RootDirectory uintptr - ObjectName *unicodeString - Attributes uintptr - SecurityDescriptor *securityDescriptor - SecurityQoS uintptr -} - -type unicodeString struct { - Length uint16 - MaximumLength uint16 - Buffer uintptr -} - -type securityDescriptor struct { - Revision byte - Sbz1 byte - Control uint16 - Owner uintptr - Group uintptr - Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl - Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl -} - -type ntStatus int32 - -func (status ntStatus) Err() error { - if status >= 0 { - return nil - } - return rtlNtStatusToDosError(status) -} - -var ( - // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. - ErrPipeListenerClosed = net.ErrClosed - - errPipeWriteClosed = errors.New("pipe has been closed for write") -) - -type win32Pipe struct { - *win32File - path string -} - -type win32MessageBytePipe struct { - win32Pipe - writeClosed bool - readEOF bool -} - -type pipeAddress string - -func (f *win32Pipe) LocalAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) RemoteAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) SetDeadline(t time.Time) error { - if err := f.SetReadDeadline(t); err != nil { - return err - } - return f.SetWriteDeadline(t) -} - -// CloseWrite closes the write side of a message pipe in byte mode. -func (f *win32MessageBytePipe) CloseWrite() error { - if f.writeClosed { - return errPipeWriteClosed - } - err := f.win32File.Flush() - if err != nil { - return err - } - _, err = f.win32File.Write(nil) - if err != nil { - return err - } - f.writeClosed = true - return nil -} - -// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since -// they are used to implement CloseWrite(). -func (f *win32MessageBytePipe) Write(b []byte) (int, error) { - if f.writeClosed { - return 0, errPipeWriteClosed - } - if len(b) == 0 { - return 0, nil - } - return f.win32File.Write(b) -} - -// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message -// mode pipe will return io.EOF, as will all subsequent reads. -func (f *win32MessageBytePipe) Read(b []byte) (int, error) { - if f.readEOF { - return 0, io.EOF - } - n, err := f.win32File.Read(b) - if err == io.EOF { //nolint:errorlint - // If this was the result of a zero-byte read, then - // it is possible that the read was due to a zero-size - // message. Since we are simulating CloseWrite with a - // zero-byte message, ensure that all future Read() calls - // also return EOF. - f.readEOF = true - } else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno - // ERROR_MORE_DATA indicates that the pipe's read mode is message mode - // and the message still has more bytes. Treat this as a success, since - // this package presents all named pipes as byte streams. - err = nil - } - return n, err -} - -func (pipeAddress) Network() string { - return "pipe" -} - -func (s pipeAddress) String() string { - return string(s) -} - -// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) { - for { - select { - case <-ctx.Done(): - return syscall.Handle(0), ctx.Err() - default: - wh, err := fs.CreateFile(*path, - access, - 0, // mode - nil, // security attributes - fs.OPEN_EXISTING, - fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS, - 0, // template file handle - ) - h := syscall.Handle(wh) - if err == nil { - return h, nil - } - if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno - return h, &os.PathError{Err: err, Op: "open", Path: *path} - } - // Wait 10 msec and try again. This is a rather simplistic - // view, as we always try each 10 milliseconds. - time.Sleep(10 * time.Millisecond) - } - } -} - -// DialPipe connects to a named pipe by path, timing out if the connection -// takes longer than the specified duration. If timeout is nil, then we use -// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) -func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { - var absTimeout time.Time - if timeout != nil { - absTimeout = time.Now().Add(*timeout) - } else { - absTimeout = time.Now().Add(2 * time.Second) - } - ctx, cancel := context.WithDeadline(context.Background(), absTimeout) - defer cancel() - conn, err := DialPipeContext(ctx, path) - if errors.Is(err, context.DeadlineExceeded) { - return nil, ErrTimeout - } - return conn, err -} - -// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` -// cancellation or timeout. -func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { - return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) -} - -// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` -// cancellation or timeout. -func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { - var err error - var h syscall.Handle - h, err = tryDialPipe(ctx, &path, fs.AccessMask(access)) - if err != nil { - return nil, err - } - - var flags uint32 - err = getNamedPipeInfo(h, &flags, nil, nil, nil) - if err != nil { - return nil, err - } - - f, err := makeWin32File(h) - if err != nil { - syscall.Close(h) - return nil, err - } - - // If the pipe is in message mode, return a message byte pipe, which - // supports CloseWrite(). - if flags&windows.PIPE_TYPE_MESSAGE != 0 { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: f, path: path}, - }, nil - } - return &win32Pipe{win32File: f, path: path}, nil -} - -type acceptResponse struct { - f *win32File - err error -} - -type win32PipeListener struct { - firstHandle syscall.Handle - path string - config PipeConfig - acceptCh chan (chan acceptResponse) - closeCh chan int - doneCh chan int -} - -func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { - path16, err := syscall.UTF16FromString(path) - if err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - - var oa objectAttributes - oa.Length = unsafe.Sizeof(oa) - - var ntPath unicodeString - if err := rtlDosPathNameToNtPathName(&path16[0], - &ntPath, - 0, - 0, - ).Err(); err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - defer localFree(ntPath.Buffer) - oa.ObjectName = &ntPath - oa.Attributes = windows.OBJ_CASE_INSENSITIVE - - // The security descriptor is only needed for the first pipe. - if first { - if sd != nil { - l := uint32(len(sd)) - sdb := localAlloc(0, l) - defer localFree(sdb) - copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) - oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) - } else { - // Construct the default named pipe security descriptor. - var dacl uintptr - if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { - return 0, fmt.Errorf("getting default named pipe ACL: %w", err) - } - defer localFree(dacl) - - sdb := &securityDescriptor{ - Revision: 1, - Control: windows.SE_DACL_PRESENT, - Dacl: dacl, - } - oa.SecurityDescriptor = sdb - } - } - - typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS) - if c.MessageMode { - typ |= windows.FILE_PIPE_MESSAGE_TYPE - } - - disposition := uint32(windows.FILE_OPEN) - access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) - if first { - disposition = windows.FILE_CREATE - // By not asking for read or write access, the named pipe file system - // will put this pipe into an initially disconnected state, blocking - // client connections until the next call with first == false. - access = syscall.SYNCHRONIZE - } - - timeout := int64(-50 * 10000) // 50ms - - var ( - h syscall.Handle - iosb ioStatusBlock - ) - err = ntCreateNamedPipeFile(&h, - access, - &oa, - &iosb, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, - disposition, - 0, - typ, - 0, - 0, - 0xffffffff, - uint32(c.InputBufferSize), - uint32(c.OutputBufferSize), - &timeout).Err() - if err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - - runtime.KeepAlive(ntPath) - return h, nil -} - -func (l *win32PipeListener) makeServerPipe() (*win32File, error) { - h, err := makeServerPipeHandle(l.path, nil, &l.config, false) - if err != nil { - return nil, err - } - f, err := makeWin32File(h) - if err != nil { - syscall.Close(h) - return nil, err - } - return f, nil -} - -func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { - p, err := l.makeServerPipe() - if err != nil { - return nil, err - } - - // Wait for the client to connect. - ch := make(chan error) - go func(p *win32File) { - ch <- connectPipe(p) - }(p) - - select { - case err = <-ch: - if err != nil { - p.Close() - p = nil - } - case <-l.closeCh: - // Abort the connect request by closing the handle. - p.Close() - p = nil - err = <-ch - if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno - err = ErrPipeListenerClosed - } - } - return p, err -} - -func (l *win32PipeListener) listenerRoutine() { - closed := false - for !closed { - select { - case <-l.closeCh: - closed = true - case responseCh := <-l.acceptCh: - var ( - p *win32File - err error - ) - for { - p, err = l.makeConnectedServerPipe() - // If the connection was immediately closed by the client, try - // again. - if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno - break - } - } - responseCh <- acceptResponse{p, err} - closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno - } - } - syscall.Close(l.firstHandle) - l.firstHandle = 0 - // Notify Close() and Accept() callers that the handle has been closed. - close(l.doneCh) -} - -// PipeConfig contain configuration for the pipe listener. -type PipeConfig struct { - // SecurityDescriptor contains a Windows security descriptor in SDDL format. - SecurityDescriptor string - - // MessageMode determines whether the pipe is in byte or message mode. In either - // case the pipe is read in byte mode by default. The only practical difference in - // this implementation is that CloseWrite() is only supported for message mode pipes; - // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only - // transferred to the reader (and returned as io.EOF in this implementation) - // when the pipe is in message mode. - MessageMode bool - - // InputBufferSize specifies the size of the input buffer, in bytes. - InputBufferSize int32 - - // OutputBufferSize specifies the size of the output buffer, in bytes. - OutputBufferSize int32 -} - -// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. -// The pipe must not already exist. -func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { - var ( - sd []byte - err error - ) - if c == nil { - c = &PipeConfig{} - } - if c.SecurityDescriptor != "" { - sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) - if err != nil { - return nil, err - } - } - h, err := makeServerPipeHandle(path, sd, c, true) - if err != nil { - return nil, err - } - l := &win32PipeListener{ - firstHandle: h, - path: path, - config: *c, - acceptCh: make(chan (chan acceptResponse)), - closeCh: make(chan int), - doneCh: make(chan int), - } - go l.listenerRoutine() - return l, nil -} - -func connectPipe(p *win32File) error { - c, err := p.prepareIO() - if err != nil { - return err - } - defer p.wg.Done() - - err = connectNamedPipe(p.handle, &c.o) - _, err = p.asyncIO(c, nil, 0, err) - if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno - return err - } - return nil -} - -func (l *win32PipeListener) Accept() (net.Conn, error) { - ch := make(chan acceptResponse) - select { - case l.acceptCh <- ch: - response := <-ch - err := response.err - if err != nil { - return nil, err - } - if l.config.MessageMode { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: response.f, path: l.path}, - }, nil - } - return &win32Pipe{win32File: response.f, path: l.path}, nil - case <-l.doneCh: - return nil, ErrPipeListenerClosed - } -} - -func (l *win32PipeListener) Close() error { - select { - case l.closeCh <- 1: - <-l.doneCh - case <-l.doneCh: - } - return nil -} - -func (l *win32PipeListener) Addr() net.Addr { - return pipeAddress(l.path) -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go deleted file mode 100644 index 48ce4e9243..0000000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ /dev/null @@ -1,232 +0,0 @@ -// Package guid provides a GUID type. The backing structure for a GUID is -// identical to that used by the golang.org/x/sys/windows GUID type. -// There are two main binary encodings used for a GUID, the big-endian encoding, -// and the Windows (mixed-endian) encoding. See here for details: -// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding -package guid - -import ( - "crypto/rand" - "crypto/sha1" //nolint:gosec // not used for secure application - "encoding" - "encoding/binary" - "fmt" - "strconv" -) - -//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment - -// Variant specifies which GUID variant (or "type") of the GUID. It determines -// how the entirety of the rest of the GUID is interpreted. -type Variant uint8 - -// The variants specified by RFC 4122 section 4.1.1. -const ( - // VariantUnknown specifies a GUID variant which does not conform to one of - // the variant encodings specified in RFC 4122. - VariantUnknown Variant = iota - VariantNCS - VariantRFC4122 // RFC 4122 - VariantMicrosoft - VariantFuture -) - -// Version specifies how the bits in the GUID were generated. For instance, a -// version 4 GUID is randomly generated, and a version 5 is generated from the -// hash of an input string. -type Version uint8 - -func (v Version) String() string { - return strconv.FormatUint(uint64(v), 10) -} - -var _ = (encoding.TextMarshaler)(GUID{}) -var _ = (encoding.TextUnmarshaler)(&GUID{}) - -// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. -func NewV4() (GUID, error) { - var b [16]byte - if _, err := rand.Read(b[:]); err != nil { - return GUID{}, err - } - - g := FromArray(b) - g.setVersion(4) // Version 4 means randomly generated. - g.setVariant(VariantRFC4122) - - return g, nil -} - -// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) -// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, -// and the sample code treats it as a series of bytes, so we do the same here. -// -// Some implementations, such as those found on Windows, treat the name as a -// big-endian UTF16 stream of bytes. If that is desired, the string can be -// encoded as such before being passed to this function. -func NewV5(namespace GUID, name []byte) (GUID, error) { - b := sha1.New() //nolint:gosec // not used for secure application - namespaceBytes := namespace.ToArray() - b.Write(namespaceBytes[:]) - b.Write(name) - - a := [16]byte{} - copy(a[:], b.Sum(nil)) - - g := FromArray(a) - g.setVersion(5) // Version 5 means generated from a string. - g.setVariant(VariantRFC4122) - - return g, nil -} - -func fromArray(b [16]byte, order binary.ByteOrder) GUID { - var g GUID - g.Data1 = order.Uint32(b[0:4]) - g.Data2 = order.Uint16(b[4:6]) - g.Data3 = order.Uint16(b[6:8]) - copy(g.Data4[:], b[8:16]) - return g -} - -func (g GUID) toArray(order binary.ByteOrder) [16]byte { - b := [16]byte{} - order.PutUint32(b[0:4], g.Data1) - order.PutUint16(b[4:6], g.Data2) - order.PutUint16(b[6:8], g.Data3) - copy(b[8:16], g.Data4[:]) - return b -} - -// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. -func FromArray(b [16]byte) GUID { - return fromArray(b, binary.BigEndian) -} - -// ToArray returns an array of 16 bytes representing the GUID in big-endian -// encoding. -func (g GUID) ToArray() [16]byte { - return g.toArray(binary.BigEndian) -} - -// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. -func FromWindowsArray(b [16]byte) GUID { - return fromArray(b, binary.LittleEndian) -} - -// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows -// encoding. -func (g GUID) ToWindowsArray() [16]byte { - return g.toArray(binary.LittleEndian) -} - -func (g GUID) String() string { - return fmt.Sprintf( - "%08x-%04x-%04x-%04x-%012x", - g.Data1, - g.Data2, - g.Data3, - g.Data4[:2], - g.Data4[2:]) -} - -// FromString parses a string containing a GUID and returns the GUID. The only -// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -// format. -func FromString(s string) (GUID, error) { - if len(s) != 36 { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - - var g GUID - - data1, err := strconv.ParseUint(s[0:8], 16, 32) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data1 = uint32(data1) - - data2, err := strconv.ParseUint(s[9:13], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data2 = uint16(data2) - - data3, err := strconv.ParseUint(s[14:18], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data3 = uint16(data3) - - for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { - v, err := strconv.ParseUint(s[x:x+2], 16, 8) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data4[i] = uint8(v) - } - - return g, nil -} - -func (g *GUID) setVariant(v Variant) { - d := g.Data4[0] - switch v { - case VariantNCS: - d = (d & 0x7f) - case VariantRFC4122: - d = (d & 0x3f) | 0x80 - case VariantMicrosoft: - d = (d & 0x1f) | 0xc0 - case VariantFuture: - d = (d & 0x0f) | 0xe0 - case VariantUnknown: - fallthrough - default: - panic(fmt.Sprintf("invalid variant: %d", v)) - } - g.Data4[0] = d -} - -// Variant returns the GUID variant, as defined in RFC 4122. -func (g GUID) Variant() Variant { - b := g.Data4[0] - if b&0x80 == 0 { - return VariantNCS - } else if b&0xc0 == 0x80 { - return VariantRFC4122 - } else if b&0xe0 == 0xc0 { - return VariantMicrosoft - } else if b&0xe0 == 0xe0 { - return VariantFuture - } - return VariantUnknown -} - -func (g *GUID) setVersion(v Version) { - g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) -} - -// Version returns the GUID version, as defined in RFC 4122. -func (g GUID) Version() Version { - return Version((g.Data3 & 0xF000) >> 12) -} - -// MarshalText returns the textual representation of the GUID. -func (g GUID) MarshalText() ([]byte, error) { - return []byte(g.String()), nil -} - -// UnmarshalText takes the textual representation of a GUID, and unmarhals it -// into this GUID. -func (g *GUID) UnmarshalText(text []byte) error { - g2, err := FromString(string(text)) - if err != nil { - return err - } - *g = g2 - return nil -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go deleted file mode 100644 index 805bd35484..0000000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !windows -// +build !windows - -package guid - -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type as that is only available to builds -// targeted at `windows`. The representation matches that used by native Windows -// code. -type GUID struct { - Data1 uint32 - Data2 uint16 - Data3 uint16 - Data4 [8]byte -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go deleted file mode 100644 index 27e45ee5cc..0000000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build windows -// +build windows - -package guid - -import "golang.org/x/sys/windows" - -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type so that stringification and -// marshaling can be supported. The representation matches that used by native -// Windows code. -type GUID windows.GUID diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go deleted file mode 100644 index 4076d3132f..0000000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT. - -package guid - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[VariantUnknown-0] - _ = x[VariantNCS-1] - _ = x[VariantRFC4122-2] - _ = x[VariantMicrosoft-3] - _ = x[VariantFuture-4] -} - -const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture" - -var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33} - -func (i Variant) String() string { - if i >= Variant(len(_Variant_index)-1) { - return "Variant(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Variant_name[_Variant_index[i]:_Variant_index[i+1]] -} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go deleted file mode 100644 index 0ff9dac906..0000000000 --- a/vendor/github.com/Microsoft/go-winio/privilege.go +++ /dev/null @@ -1,197 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "runtime" - "sync" - "syscall" - "unicode/utf16" - - "golang.org/x/sys/windows" -) - -//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges -//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf -//sys revertToSelf() (err error) = advapi32.RevertToSelf -//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken -//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread -//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW -//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW -//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW - -const ( - //revive:disable-next-line:var-naming ALL_CAPS - SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED - - //revive:disable-next-line:var-naming ALL_CAPS - ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED - - SeBackupPrivilege = "SeBackupPrivilege" - SeRestorePrivilege = "SeRestorePrivilege" - SeSecurityPrivilege = "SeSecurityPrivilege" -) - -var ( - privNames = make(map[string]uint64) - privNameMutex sync.Mutex -) - -// PrivilegeError represents an error enabling privileges. -type PrivilegeError struct { - privileges []uint64 -} - -func (e *PrivilegeError) Error() string { - s := "Could not enable privilege " - if len(e.privileges) > 1 { - s = "Could not enable privileges " - } - for i, p := range e.privileges { - if i != 0 { - s += ", " - } - s += `"` - s += getPrivilegeName(p) - s += `"` - } - return s -} - -// RunWithPrivilege enables a single privilege for a function call. -func RunWithPrivilege(name string, fn func() error) error { - return RunWithPrivileges([]string{name}, fn) -} - -// RunWithPrivileges enables privileges for a function call. -func RunWithPrivileges(names []string, fn func() error) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - runtime.LockOSThread() - defer runtime.UnlockOSThread() - token, err := newThreadToken() - if err != nil { - return err - } - defer releaseThreadToken(token) - err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) - if err != nil { - return err - } - return fn() -} - -func mapPrivileges(names []string) ([]uint64, error) { - privileges := make([]uint64, 0, len(names)) - privNameMutex.Lock() - defer privNameMutex.Unlock() - for _, name := range names { - p, ok := privNames[name] - if !ok { - err := lookupPrivilegeValue("", name, &p) - if err != nil { - return nil, err - } - privNames[name] = p - } - privileges = append(privileges, p) - } - return privileges, nil -} - -// EnableProcessPrivileges enables privileges globally for the process. -func EnableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) -} - -// DisableProcessPrivileges disables privileges globally for the process. -func DisableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, 0) -} - -func enableDisableProcessPrivilege(names []string, action uint32) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - - p := windows.CurrentProcess() - var token windows.Token - err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) - if err != nil { - return err - } - - defer token.Close() - return adjustPrivileges(token, privileges, action) -} - -func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { - var b bytes.Buffer - _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) - for _, p := range privileges { - _ = binary.Write(&b, binary.LittleEndian, p) - _ = binary.Write(&b, binary.LittleEndian, action) - } - prevState := make([]byte, b.Len()) - reqSize := uint32(0) - success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) - if !success { - return err - } - if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno - return &PrivilegeError{privileges} - } - return nil -} - -func getPrivilegeName(luid uint64) string { - var nameBuffer [256]uint16 - bufSize := uint32(len(nameBuffer)) - err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) - if err != nil { - return fmt.Sprintf("", luid) - } - - var displayNameBuffer [256]uint16 - displayBufSize := uint32(len(displayNameBuffer)) - var langID uint32 - err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) - if err != nil { - return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) - } - - return string(utf16.Decode(displayNameBuffer[:displayBufSize])) -} - -func newThreadToken() (windows.Token, error) { - err := impersonateSelf(windows.SecurityImpersonation) - if err != nil { - return 0, err - } - - var token windows.Token - err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) - if err != nil { - rerr := revertToSelf() - if rerr != nil { - panic(rerr) - } - return 0, err - } - return token, nil -} - -func releaseThreadToken(h windows.Token) { - err := revertToSelf() - if err != nil { - panic(err) - } - h.Close() -} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go deleted file mode 100644 index 67d1a104a6..0000000000 --- a/vendor/github.com/Microsoft/go-winio/reparse.go +++ /dev/null @@ -1,131 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "strings" - "unicode/utf16" - "unsafe" -) - -const ( - reparseTagMountPoint = 0xA0000003 - reparseTagSymlink = 0xA000000C -) - -type reparseDataBuffer struct { - ReparseTag uint32 - ReparseDataLength uint16 - Reserved uint16 - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 -} - -// ReparsePoint describes a Win32 symlink or mount point. -type ReparsePoint struct { - Target string - IsMountPoint bool -} - -// UnsupportedReparsePointError is returned when trying to decode a non-symlink or -// mount point reparse point. -type UnsupportedReparsePointError struct { - Tag uint32 -} - -func (e *UnsupportedReparsePointError) Error() string { - return fmt.Sprintf("unsupported reparse point %x", e.Tag) -} - -// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink -// or a mount point. -func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { - tag := binary.LittleEndian.Uint32(b[0:4]) - return DecodeReparsePointData(tag, b[8:]) -} - -func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { - isMountPoint := false - switch tag { - case reparseTagMountPoint: - isMountPoint = true - case reparseTagSymlink: - default: - return nil, &UnsupportedReparsePointError{tag} - } - nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) - if !isMountPoint { - nameOffset += 4 - } - nameLength := binary.LittleEndian.Uint16(b[6:8]) - name := make([]uint16, nameLength/2) - err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) - if err != nil { - return nil, err - } - return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil -} - -func isDriveLetter(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or -// mount point. -func EncodeReparsePoint(rp *ReparsePoint) []byte { - // Generate an NT path and determine if this is a relative path. - var ntTarget string - relative := false - if strings.HasPrefix(rp.Target, `\\?\`) { - ntTarget = `\??\` + rp.Target[4:] - } else if strings.HasPrefix(rp.Target, `\\`) { - ntTarget = `\??\UNC\` + rp.Target[2:] - } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { - ntTarget = `\??\` + rp.Target - } else { - ntTarget = rp.Target - relative = true - } - - // The paths must be NUL-terminated even though they are counted strings. - target16 := utf16.Encode([]rune(rp.Target + "\x00")) - ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) - - size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 - size += len(ntTarget16)*2 + len(target16)*2 - - tag := uint32(reparseTagMountPoint) - if !rp.IsMountPoint { - tag = reparseTagSymlink - size += 4 // Add room for symlink flags - } - - data := reparseDataBuffer{ - ReparseTag: tag, - ReparseDataLength: uint16(size), - SubstituteNameOffset: 0, - SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), - PrintNameOffset: uint16(len(ntTarget16) * 2), - PrintNameLength: uint16((len(target16) - 1) * 2), - } - - var b bytes.Buffer - _ = binary.Write(&b, binary.LittleEndian, &data) - if !rp.IsMountPoint { - flags := uint32(0) - if relative { - flags |= 1 - } - _ = binary.Write(&b, binary.LittleEndian, flags) - } - - _ = binary.Write(&b, binary.LittleEndian, ntTarget16) - _ = binary.Write(&b, binary.LittleEndian, target16) - return b.Bytes() -} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go deleted file mode 100644 index 5550ef6b61..0000000000 --- a/vendor/github.com/Microsoft/go-winio/sd.go +++ /dev/null @@ -1,144 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "errors" - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW -//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW -//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW -//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW -//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW -//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW -//sys localFree(mem uintptr) = LocalFree -//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength - -type AccountLookupError struct { - Name string - Err error -} - -func (e *AccountLookupError) Error() string { - if e.Name == "" { - return "lookup account: empty account name specified" - } - var s string - switch { - case errors.Is(e.Err, windows.ERROR_INVALID_SID): - s = "the security ID structure is invalid" - case errors.Is(e.Err, windows.ERROR_NONE_MAPPED): - s = "not found" - default: - s = e.Err.Error() - } - return "lookup account " + e.Name + ": " + s -} - -func (e *AccountLookupError) Unwrap() error { return e.Err } - -type SddlConversionError struct { - Sddl string - Err error -} - -func (e *SddlConversionError) Error() string { - return "convert " + e.Sddl + ": " + e.Err.Error() -} - -func (e *SddlConversionError) Unwrap() error { return e.Err } - -// LookupSidByName looks up the SID of an account by name -// -//revive:disable-next-line:var-naming SID, not Sid -func LookupSidByName(name string) (sid string, err error) { - if name == "" { - return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED} - } - - var sidSize, sidNameUse, refDomainSize uint32 - err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno - return "", &AccountLookupError{name, err} - } - sidBuffer := make([]byte, sidSize) - refDomainBuffer := make([]uint16, refDomainSize) - err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) - if err != nil { - return "", &AccountLookupError{name, err} - } - var strBuffer *uint16 - err = convertSidToStringSid(&sidBuffer[0], &strBuffer) - if err != nil { - return "", &AccountLookupError{name, err} - } - sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) - localFree(uintptr(unsafe.Pointer(strBuffer))) - return sid, nil -} - -// LookupNameBySid looks up the name of an account by SID -// -//revive:disable-next-line:var-naming SID, not Sid -func LookupNameBySid(sid string) (name string, err error) { - if sid == "" { - return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED} - } - - sidBuffer, err := windows.UTF16PtrFromString(sid) - if err != nil { - return "", &AccountLookupError{sid, err} - } - - var sidPtr *byte - if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { - return "", &AccountLookupError{sid, err} - } - defer localFree(uintptr(unsafe.Pointer(sidPtr))) - - var nameSize, refDomainSize, sidNameUse uint32 - err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno - return "", &AccountLookupError{sid, err} - } - - nameBuffer := make([]uint16, nameSize) - refDomainBuffer := make([]uint16, refDomainSize) - err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) - if err != nil { - return "", &AccountLookupError{sid, err} - } - - name = windows.UTF16ToString(nameBuffer) - return name, nil -} - -func SddlToSecurityDescriptor(sddl string) ([]byte, error) { - var sdBuffer uintptr - err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) - if err != nil { - return nil, &SddlConversionError{sddl, err} - } - defer localFree(sdBuffer) - sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) - copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) - return sd, nil -} - -func SecurityDescriptorToSddl(sd []byte) (string, error) { - var sddl *uint16 - // The returned string length seems to include an arbitrary number of terminating NULs. - // Don't use it. - err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) - if err != nil { - return "", err - } - defer localFree(uintptr(unsafe.Pointer(sddl))) - return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil -} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go deleted file mode 100644 index a6ca111b39..0000000000 --- a/vendor/github.com/Microsoft/go-winio/syscall.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build windows - -package winio - -//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go diff --git a/vendor/github.com/Microsoft/go-winio/tools.go b/vendor/github.com/Microsoft/go-winio/tools.go deleted file mode 100644 index 2aa045843e..0000000000 --- a/vendor/github.com/Microsoft/go-winio/tools.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build tools - -package winio - -import _ "golang.org/x/tools/cmd/stringer" diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go deleted file mode 100644 index 469b16f639..0000000000 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ /dev/null @@ -1,419 +0,0 @@ -//go:build windows - -// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. - -package winio - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - modntdll = windows.NewLazySystemDLL("ntdll.dll") - modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procBackupRead = modkernel32.NewProc("BackupRead") - procBackupWrite = modkernel32.NewProc("BackupWrite") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procLocalFree = modkernel32.NewProc("LocalFree") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") - procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") - procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") -) - -func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { - var _p0 uint32 - if releaseAll { - _p0 = 1 - } - r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) - success = r0 != 0 - if true { - err = errnoErr(e1) - } - return -} - -func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertSidToStringSid(sid *byte, str **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return - } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) -} - -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertStringSidToSid(str *uint16, sid **byte) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getSecurityDescriptorLength(sd uintptr) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) - len = uint32(r0) - return -} - -func impersonateSelf(level uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(accountName) - if err != nil { - return - } - return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) -} - -func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) -} - -func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeName(_p0, luid, buffer, size) -} - -func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - var _p1 *uint16 - _p1, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _lookupPrivilegeValue(_p0, _p1, luid) -} - -func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func revertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } - r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } - r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) - newport = syscall.Handle(r0) - if newport == 0 { - err = errnoErr(e1) - } - return -} - -func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) -} - -func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = errnoErr(e1) - } - return -} - -func getCurrentThread() (h syscall.Handle) { - r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) - h = syscall.Handle(r0) - return -} - -func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) - ptr = uintptr(r0) - return -} - -func localFree(mem uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) - return -} - -func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) - status = ntStatus(r0) - return -} - -func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) - status = ntStatus(r0) - return -} - -func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) - status = ntStatus(r0) - return -} - -func rtlNtStatusToDosError(status ntStatus) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) - if r0 != 0 { - winerr = syscall.Errno(r0) - } - return -} - -func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { - var _p0 uint32 - if wait { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/alessio/shellescape/.gitignore b/vendor/github.com/alessio/shellescape/.gitignore deleted file mode 100644 index 4ba7c2d137..0000000000 --- a/vendor/github.com/alessio/shellescape/.gitignore +++ /dev/null @@ -1,28 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -.idea/ - -escargs diff --git a/vendor/github.com/alessio/shellescape/.golangci.yml b/vendor/github.com/alessio/shellescape/.golangci.yml deleted file mode 100644 index cd4a17e442..0000000000 --- a/vendor/github.com/alessio/shellescape/.golangci.yml +++ /dev/null @@ -1,64 +0,0 @@ -# run: -# # timeout for analysis, e.g. 30s, 5m, default is 1m -# timeout: 5m - -linters: - disable-all: true - enable: - - bodyclose - - deadcode - - depguard - - dogsled - - goconst - - gocritic - - gofmt - - goimports - - golint - - gosec - - gosimple - - govet - - ineffassign - - interfacer - - maligned - - misspell - - prealloc - - scopelint - - staticcheck - - structcheck - - stylecheck - - typecheck - - unconvert - - unparam - - unused - - misspell - - wsl - -issues: - exclude-rules: - - text: "Use of weak random number generator" - linters: - - gosec - - text: "comment on exported var" - linters: - - golint - - text: "don't use an underscore in package name" - linters: - - golint - - text: "ST1003:" - linters: - - stylecheck - # FIXME: Disabled until golangci-lint updates stylecheck with this fix: - # https://github.com/dominikh/go-tools/issues/389 - - text: "ST1016:" - linters: - - stylecheck - -linters-settings: - dogsled: - max-blank-identifiers: 3 - maligned: - # print struct with more effective memory layout or not, false by default - suggest-new: true - -run: - tests: false diff --git a/vendor/github.com/alessio/shellescape/.goreleaser.yml b/vendor/github.com/alessio/shellescape/.goreleaser.yml deleted file mode 100644 index 064c9374d7..0000000000 --- a/vendor/github.com/alessio/shellescape/.goreleaser.yml +++ /dev/null @@ -1,33 +0,0 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com -before: - hooks: - # You may remove this if you don't use go modules. - - go mod download - # you may remove this if you don't need go generate - - go generate ./... -builds: - - env: - - CGO_ENABLED=0 - main: ./cmd/escargs - goos: - - linux - - windows - - darwin -archives: - - replacements: - darwin: Darwin - linux: Linux - windows: Windows - 386: i386 - amd64: x86_64 -checksum: - name_template: 'checksums.txt' -snapshot: - name_template: "{{ .Tag }}-next" -changelog: - sort: asc - filters: - exclude: - - '^docs:' - - '^test:' diff --git a/vendor/github.com/alessio/shellescape/AUTHORS b/vendor/github.com/alessio/shellescape/AUTHORS deleted file mode 100644 index 4a647a6f41..0000000000 --- a/vendor/github.com/alessio/shellescape/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Alessio Treglia diff --git a/vendor/github.com/alessio/shellescape/CODE_OF_CONDUCT.md b/vendor/github.com/alessio/shellescape/CODE_OF_CONDUCT.md deleted file mode 100644 index e8eda60620..0000000000 --- a/vendor/github.com/alessio/shellescape/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,76 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at alessio@debian.org. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/vendor/github.com/alessio/shellescape/LICENSE b/vendor/github.com/alessio/shellescape/LICENSE deleted file mode 100644 index 9f760679f4..0000000000 --- a/vendor/github.com/alessio/shellescape/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Alessio Treglia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/alessio/shellescape/README.md b/vendor/github.com/alessio/shellescape/README.md deleted file mode 100644 index 910bb253b9..0000000000 --- a/vendor/github.com/alessio/shellescape/README.md +++ /dev/null @@ -1,61 +0,0 @@ -![Build](https://github.com/alessio/shellescape/workflows/Build/badge.svg) -[![GoDoc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/alessio/shellescape?tab=overview) -[![sourcegraph](https://sourcegraph.com/github.com/alessio/shellescape/-/badge.svg)](https://sourcegraph.com/github.com/alessio/shellescape) -[![codecov](https://codecov.io/gh/alessio/shellescape/branch/master/graph/badge.svg)](https://codecov.io/gh/alessio/shellescape) -[![Coverage](https://gocover.io/_badge/github.com/alessio/shellescape)](https://gocover.io/github.com/alessio/shellescape) -[![Go Report Card](https://goreportcard.com/badge/github.com/alessio/shellescape)](https://goreportcard.com/report/github.com/alessio/shellescape) - -# shellescape -Escape arbitrary strings for safe use as command line arguments. -## Contents of the package - -This package provides the `shellescape.Quote()` function that returns a -shell-escaped copy of a string. This functionality could be helpful -in those cases where it is known that the output of a Go program will -be appended to/used in the context of shell programs' command line arguments. - -This work was inspired by the Python original package -[shellescape](https://pypi.python.org/pypi/shellescape). - -## Usage - -The following snippet shows a typical unsafe idiom: - -```go -package main - -import ( - "fmt" - "os" -) - -func main() { - fmt.Printf("ls -l %s\n", os.Args[1]) -} -``` -_[See in Go Playground](https://play.golang.org/p/Wj2WoUfH_d)_ - -Especially when creating pipeline of commands which might end up being -executed by a shell interpreter, it is particularly unsafe to not -escape arguments. - -`shellescape.Quote()` comes in handy and to safely escape strings: - -```go -package main - -import ( - "fmt" - "os" - - "gopkg.in/alessio/shellescape.v1" -) - -func main() { - fmt.Printf("ls -l %s\n", shellescape.Quote(os.Args[1])) -} -``` -_[See in Go Playground](https://play.golang.org/p/HJ_CXgSrmp)_ - -## The escargs utility -__escargs__ reads lines from the standard input and prints shell-escaped versions. Unlinke __xargs__, blank lines on the standard input are not discarded. diff --git a/vendor/github.com/alessio/shellescape/shellescape.go b/vendor/github.com/alessio/shellescape/shellescape.go deleted file mode 100644 index dc34a556ae..0000000000 --- a/vendor/github.com/alessio/shellescape/shellescape.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Package shellescape provides the shellescape.Quote to escape arbitrary -strings for a safe use as command line arguments in the most common -POSIX shells. - -The original Python package which this work was inspired by can be found -at https://pypi.python.org/pypi/shellescape. -*/ -package shellescape // "import gopkg.in/alessio/shellescape.v1" - -/* -The functionality provided by shellescape.Quote could be helpful -in those cases where it is known that the output of a Go program will -be appended to/used in the context of shell programs' command line arguments. -*/ - -import ( - "regexp" - "strings" - "unicode" -) - -var pattern *regexp.Regexp - -func init() { - pattern = regexp.MustCompile(`[^\w@%+=:,./-]`) -} - -// Quote returns a shell-escaped version of the string s. The returned value -// is a string that can safely be used as one token in a shell command line. -func Quote(s string) string { - if len(s) == 0 { - return "''" - } - - if pattern.MatchString(s) { - return "'" + strings.ReplaceAll(s, "'", "'\"'\"'") + "'" - } - - return s -} - -// QuoteCommand returns a shell-escaped version of the slice of strings. -// The returned value is a string that can safely be used as shell command arguments. -func QuoteCommand(args []string) string { - l := make([]string, len(args)) - - for i, s := range args { - l[i] = Quote(s) - } - - return strings.Join(l, " ") -} - -// StripUnsafe remove non-printable runes, e.g. control characters in -// a string that is meant for consumption by terminals that support -// control characters. -func StripUnsafe(s string) string { - return strings.Map(func(r rune) rune { - if unicode.IsPrint(r) { - return r - } - - return -1 - }, s) -} diff --git a/vendor/github.com/asaskevich/govalidator/.gitignore b/vendor/github.com/asaskevich/govalidator/.gitignore deleted file mode 100644 index 8d69a9418a..0000000000 --- a/vendor/github.com/asaskevich/govalidator/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -bin/ -.idea/ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml deleted file mode 100644 index bb83c6670d..0000000000 --- a/vendor/github.com/asaskevich/govalidator/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -dist: xenial -go: - - '1.10' - - '1.11' - - '1.12' - - '1.13' - - 'tip' - -script: - - go test -coverpkg=./... -coverprofile=coverage.info -timeout=5s - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md deleted file mode 100644 index 4b462b0d81..0000000000 --- a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,43 +0,0 @@ -# Contributor Code of Conduct - -This project adheres to [The Code Manifesto](http://codemanifesto.com) -as its guidelines for contributor interactions. - -## The Code Manifesto - -We want to work in an ecosystem that empowers developers to reach their -potential — one that encourages growth and effective collaboration. A space -that is safe for all. - -A space such as this benefits everyone that participates in it. It encourages -new developers to enter our field. It is through discussion and collaboration -that we grow, and through growth that we improve. - -In the effort to create such a place, we hold to these values: - -1. **Discrimination limits us.** This includes discrimination on the basis of - race, gender, sexual orientation, gender identity, age, nationality, - technology and any other arbitrary exclusion of a group of people. -2. **Boundaries honor us.** Your comfort levels are not everyone’s comfort - levels. Remember that, and if brought to your attention, heed it. -3. **We are our biggest assets.** None of us were born masters of our trade. - Each of us has been helped along the way. Return that favor, when and where - you can. -4. **We are resources for the future.** As an extension of #3, share what you - know. Make yourself a resource to help those that come after you. -5. **Respect defines us.** Treat others as you wish to be treated. Make your - discussions, criticisms and debates from a position of respectfulness. Ask - yourself, is it true? Is it necessary? Is it constructive? Anything less is - unacceptable. -6. **Reactions require grace.** Angry responses are valid, but abusive language - and vindictive actions are toxic. When something happens that offends you, - handle it assertively, but be respectful. Escalate reasonably, and try to - allow the offender an opportunity to explain themselves, and possibly - correct the issue. -7. **Opinions are just that: opinions.** Each and every one of us, due to our - background and upbringing, have varying opinions. That is perfectly - acceptable. Remember this: if you respect your own opinions, you should - respect the opinions of others. -8. **To err is human.** You might not intend it, but mistakes do happen and - contribute to build experience. Tolerate honest mistakes, and don't - hesitate to apologize if you make one yourself. diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md deleted file mode 100644 index 7ed268a1ed..0000000000 --- a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md +++ /dev/null @@ -1,63 +0,0 @@ -#### Support -If you do have a contribution to the package, feel free to create a Pull Request or an Issue. - -#### What to contribute -If you don't know what to do, there are some features and functions that need to be done - -- [ ] Refactor code -- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check -- [ ] Create actual list of contributors and projects that currently using this package -- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) -- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) -- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new -- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc -- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) -- [ ] Implement fuzzing testing -- [ ] Implement some struct/map/array utilities -- [ ] Implement map/array validation -- [ ] Implement benchmarking -- [ ] Implement batch of examples -- [ ] Look at forks for new features and fixes - -#### Advice -Feel free to create what you want, but keep in mind when you implement new features: -- Code must be clear and readable, names of variables/constants clearly describes what they are doing -- Public functions must be documented and described in source file and added to README.md to the list of available functions -- There are must be unit-tests for any new functions and improvements - -## Financial contributions - -We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). -Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. - - -## Credits - - -### Contributors - -Thank you to all the people who have already contributed to govalidator! - - - -### Backers - -Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] - - - - -### Sponsors - -Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) - - - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE deleted file mode 100644 index cacba91024..0000000000 --- a/vendor/github.com/asaskevich/govalidator/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014-2020 Alex Saskevich - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md deleted file mode 100644 index 2c3fc35eb6..0000000000 --- a/vendor/github.com/asaskevich/govalidator/README.md +++ /dev/null @@ -1,622 +0,0 @@ -govalidator -=========== -[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) -[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) -[![Coverage](https://codecov.io/gh/asaskevich/govalidator/branch/master/graph/badge.svg)](https://codecov.io/gh/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) - -A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). - -#### Installation -Make sure that Go is installed on your computer. -Type the following command in your terminal: - - go get github.com/asaskevich/govalidator - -or you can get specified release of the package with `gopkg.in`: - - go get gopkg.in/asaskevich/govalidator.v10 - -After it the package is ready to use. - - -#### Import package in your project -Add following line in your `*.go` file: -```go -import "github.com/asaskevich/govalidator" -``` -If you are unhappy to use long `govalidator`, you can do something like this: -```go -import ( - valid "github.com/asaskevich/govalidator" -) -``` - -#### Activate behavior to require all fields have a validation tag by default -`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. - -`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. - -```go -import "github.com/asaskevich/govalidator" - -func init() { - govalidator.SetFieldsRequiredByDefault(true) -} -``` - -Here's some code to explain it: -```go -// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): -type exampleStruct struct { - Name string `` - Email string `valid:"email"` -} - -// this, however, will only fail when Email is empty or an invalid email address: -type exampleStruct2 struct { - Name string `valid:"-"` - Email string `valid:"email"` -} - -// lastly, this will only fail when Email is an invalid email address but not when it's empty: -type exampleStruct2 struct { - Name string `valid:"-"` - Email string `valid:"email,optional"` -} -``` - -#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) -##### Custom validator function signature -A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. -```go -import "github.com/asaskevich/govalidator" - -// old signature -func(i interface{}) bool - -// new signature -func(i interface{}, o interface{}) bool -``` - -##### Adding a custom validator -This was changed to prevent data races when accessing custom validators. -```go -import "github.com/asaskevich/govalidator" - -// before -govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool { - // ... -} - -// after -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool { - // ... -}) -``` - -#### List of functions: -```go -func Abs(value float64) float64 -func BlackList(str, chars string) string -func ByteLength(str string, params ...string) bool -func CamelCaseToUnderscore(str string) string -func Contains(str, substring string) bool -func Count(array []interface{}, iterator ConditionIterator) int -func Each(array []interface{}, iterator Iterator) -func ErrorByField(e error, field string) string -func ErrorsByField(e error) map[string]string -func Filter(array []interface{}, iterator ConditionIterator) []interface{} -func Find(array []interface{}, iterator ConditionIterator) interface{} -func GetLine(s string, index int) (string, error) -func GetLines(s string) []string -func HasLowerCase(str string) bool -func HasUpperCase(str string) bool -func HasWhitespace(str string) bool -func HasWhitespaceOnly(str string) bool -func InRange(value interface{}, left interface{}, right interface{}) bool -func InRangeFloat32(value, left, right float32) bool -func InRangeFloat64(value, left, right float64) bool -func InRangeInt(value, left, right interface{}) bool -func IsASCII(str string) bool -func IsAlpha(str string) bool -func IsAlphanumeric(str string) bool -func IsBase64(str string) bool -func IsByteLength(str string, min, max int) bool -func IsCIDR(str string) bool -func IsCRC32(str string) bool -func IsCRC32b(str string) bool -func IsCreditCard(str string) bool -func IsDNSName(str string) bool -func IsDataURI(str string) bool -func IsDialString(str string) bool -func IsDivisibleBy(str, num string) bool -func IsEmail(str string) bool -func IsExistingEmail(email string) bool -func IsFilePath(str string) (bool, int) -func IsFloat(str string) bool -func IsFullWidth(str string) bool -func IsHalfWidth(str string) bool -func IsHash(str string, algorithm string) bool -func IsHexadecimal(str string) bool -func IsHexcolor(str string) bool -func IsHost(str string) bool -func IsIP(str string) bool -func IsIPv4(str string) bool -func IsIPv6(str string) bool -func IsISBN(str string, version int) bool -func IsISBN10(str string) bool -func IsISBN13(str string) bool -func IsISO3166Alpha2(str string) bool -func IsISO3166Alpha3(str string) bool -func IsISO4217(str string) bool -func IsISO693Alpha2(str string) bool -func IsISO693Alpha3b(str string) bool -func IsIn(str string, params ...string) bool -func IsInRaw(str string, params ...string) bool -func IsInt(str string) bool -func IsJSON(str string) bool -func IsLatitude(str string) bool -func IsLongitude(str string) bool -func IsLowerCase(str string) bool -func IsMAC(str string) bool -func IsMD4(str string) bool -func IsMD5(str string) bool -func IsMagnetURI(str string) bool -func IsMongoID(str string) bool -func IsMultibyte(str string) bool -func IsNatural(value float64) bool -func IsNegative(value float64) bool -func IsNonNegative(value float64) bool -func IsNonPositive(value float64) bool -func IsNotNull(str string) bool -func IsNull(str string) bool -func IsNumeric(str string) bool -func IsPort(str string) bool -func IsPositive(value float64) bool -func IsPrintableASCII(str string) bool -func IsRFC3339(str string) bool -func IsRFC3339WithoutZone(str string) bool -func IsRGBcolor(str string) bool -func IsRegex(str string) bool -func IsRequestURI(rawurl string) bool -func IsRequestURL(rawurl string) bool -func IsRipeMD128(str string) bool -func IsRipeMD160(str string) bool -func IsRsaPub(str string, params ...string) bool -func IsRsaPublicKey(str string, keylen int) bool -func IsSHA1(str string) bool -func IsSHA256(str string) bool -func IsSHA384(str string) bool -func IsSHA512(str string) bool -func IsSSN(str string) bool -func IsSemver(str string) bool -func IsTiger128(str string) bool -func IsTiger160(str string) bool -func IsTiger192(str string) bool -func IsTime(str string, format string) bool -func IsType(v interface{}, params ...string) bool -func IsURL(str string) bool -func IsUTFDigit(str string) bool -func IsUTFLetter(str string) bool -func IsUTFLetterNumeric(str string) bool -func IsUTFNumeric(str string) bool -func IsUUID(str string) bool -func IsUUIDv3(str string) bool -func IsUUIDv4(str string) bool -func IsUUIDv5(str string) bool -func IsULID(str string) bool -func IsUnixTime(str string) bool -func IsUpperCase(str string) bool -func IsVariableWidth(str string) bool -func IsWhole(value float64) bool -func LeftTrim(str, chars string) string -func Map(array []interface{}, iterator ResultIterator) []interface{} -func Matches(str, pattern string) bool -func MaxStringLength(str string, params ...string) bool -func MinStringLength(str string, params ...string) bool -func NormalizeEmail(str string) (string, error) -func PadBoth(str string, padStr string, padLen int) string -func PadLeft(str string, padStr string, padLen int) string -func PadRight(str string, padStr string, padLen int) string -func PrependPathToErrors(err error, path string) error -func Range(str string, params ...string) bool -func RemoveTags(s string) string -func ReplacePattern(str, pattern, replace string) string -func Reverse(s string) string -func RightTrim(str, chars string) string -func RuneLength(str string, params ...string) bool -func SafeFileName(str string) string -func SetFieldsRequiredByDefault(value bool) -func SetNilPtrAllowedByRequired(value bool) -func Sign(value float64) float64 -func StringLength(str string, params ...string) bool -func StringMatches(s string, params ...string) bool -func StripLow(str string, keepNewLines bool) string -func ToBoolean(str string) (bool, error) -func ToFloat(str string) (float64, error) -func ToInt(value interface{}) (res int64, err error) -func ToJSON(obj interface{}) (string, error) -func ToString(obj interface{}) string -func Trim(str, chars string) string -func Truncate(str string, length int, ending string) string -func TruncatingErrorf(str string, args ...interface{}) error -func UnderscoreToCamelCase(s string) string -func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error) -func ValidateStruct(s interface{}) (bool, error) -func WhiteList(str, chars string) string -type ConditionIterator -type CustomTypeValidator -type Error -func (e Error) Error() string -type Errors -func (es Errors) Error() string -func (es Errors) Errors() []error -type ISO3166Entry -type ISO693Entry -type InterfaceParamValidator -type Iterator -type ParamValidator -type ResultIterator -type UnsupportedTypeError -func (e *UnsupportedTypeError) Error() string -type Validator -``` - -#### Examples -###### IsURL -```go -println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) -``` -###### IsType -```go -println(govalidator.IsType("Bob", "string")) -println(govalidator.IsType(1, "int")) -i := 1 -println(govalidator.IsType(&i, "*int")) -``` - -IsType can be used through the tag `type` which is essential for map validation: -```go -type User struct { - Name string `valid:"type(string)"` - Age int `valid:"type(int)"` - Meta interface{} `valid:"type(string)"` -} -result, err := govalidator.ValidateStruct(User{"Bob", 20, "meta"}) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` -###### ToString -```go -type User struct { - FirstName string - LastName string -} - -str := govalidator.ToString(&User{"John", "Juan"}) -println(str) -``` -###### Each, Map, Filter, Count for slices -Each iterates over the slice/array and calls Iterator for every item -```go -data := []interface{}{1, 2, 3, 4, 5} -var fn govalidator.Iterator = func(value interface{}, index int) { - println(value.(int)) -} -govalidator.Each(data, fn) -``` -```go -data := []interface{}{1, 2, 3, 4, 5} -var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { - return value.(int) * 3 -} -_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} -``` -```go -data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} -var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { - return value.(int)%2 == 0 -} -_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} -_ = govalidator.Count(data, fn) // result = 5 -``` -###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) -If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: -```go -govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { - return str == "duck" -}) -``` -For completely custom validators (interface-based), see below. - -Here is a list of available validators for struct fields (validator - used function): -```go -"email": IsEmail, -"url": IsURL, -"dialstring": IsDialString, -"requrl": IsRequestURL, -"requri": IsRequestURI, -"alpha": IsAlpha, -"utfletter": IsUTFLetter, -"alphanum": IsAlphanumeric, -"utfletternum": IsUTFLetterNumeric, -"numeric": IsNumeric, -"utfnumeric": IsUTFNumeric, -"utfdigit": IsUTFDigit, -"hexadecimal": IsHexadecimal, -"hexcolor": IsHexcolor, -"rgbcolor": IsRGBcolor, -"lowercase": IsLowerCase, -"uppercase": IsUpperCase, -"int": IsInt, -"float": IsFloat, -"null": IsNull, -"uuid": IsUUID, -"uuidv3": IsUUIDv3, -"uuidv4": IsUUIDv4, -"uuidv5": IsUUIDv5, -"creditcard": IsCreditCard, -"isbn10": IsISBN10, -"isbn13": IsISBN13, -"json": IsJSON, -"multibyte": IsMultibyte, -"ascii": IsASCII, -"printableascii": IsPrintableASCII, -"fullwidth": IsFullWidth, -"halfwidth": IsHalfWidth, -"variablewidth": IsVariableWidth, -"base64": IsBase64, -"datauri": IsDataURI, -"ip": IsIP, -"port": IsPort, -"ipv4": IsIPv4, -"ipv6": IsIPv6, -"dns": IsDNSName, -"host": IsHost, -"mac": IsMAC, -"latitude": IsLatitude, -"longitude": IsLongitude, -"ssn": IsSSN, -"semver": IsSemver, -"rfc3339": IsRFC3339, -"rfc3339WithoutZone": IsRFC3339WithoutZone, -"ISO3166Alpha2": IsISO3166Alpha2, -"ISO3166Alpha3": IsISO3166Alpha3, -"ulid": IsULID, -``` -Validators with parameters - -```go -"range(min|max)": Range, -"length(min|max)": ByteLength, -"runelength(min|max)": RuneLength, -"stringlength(min|max)": StringLength, -"matches(pattern)": StringMatches, -"in(string1|string2|...|stringN)": IsIn, -"rsapub(keylength)" : IsRsaPub, -"minstringlength(int): MinStringLength, -"maxstringlength(int): MaxStringLength, -``` -Validators with parameters for any type - -```go -"type(type)": IsType, -``` - -And here is small example of usage: -```go -type Post struct { - Title string `valid:"alphanum,required"` - Message string `valid:"duck,ascii"` - Message2 string `valid:"animal(dog)"` - AuthorIP string `valid:"ipv4"` - Date string `valid:"-"` -} -post := &Post{ - Title: "My Example Post", - Message: "duck", - Message2: "dog", - AuthorIP: "123.234.54.3", -} - -// Add your own struct validation tags -govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { - return str == "duck" -}) - -// Add your own struct validation tags with parameter -govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool { - species := params[0] - return str == species -}) -govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$") - -result, err := govalidator.ValidateStruct(post) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` -###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338) -If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}` - -So here is small example of usage: -```go -var mapTemplate = map[string]interface{}{ - "name":"required,alpha", - "family":"required,alpha", - "email":"required,email", - "cell-phone":"numeric", - "address":map[string]interface{}{ - "line1":"required,alphanum", - "line2":"alphanum", - "postal-code":"numeric", - }, -} - -var inputMap = map[string]interface{}{ - "name":"Bob", - "family":"Smith", - "email":"foo@bar.baz", - "address":map[string]interface{}{ - "line1":"", - "line2":"", - "postal-code":"", - }, -} - -result, err := govalidator.ValidateMap(inputMap, mapTemplate) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` - -###### WhiteList -```go -// Remove all characters from string ignoring characters between "a" and "z" -println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") -``` - -###### Custom validation functions -Custom validation using your own domain specific validators is also available - here's an example of how to use it: -```go -import "github.com/asaskevich/govalidator" - -type CustomByteArray [6]byte // custom types are supported and can be validated - -type StructWithCustomByteArray struct { - ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence - Email string `valid:"email"` - CustomMinLength int `valid:"-"` -} - -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool { - switch v := context.(type) { // you can type switch on the context interface being validated - case StructWithCustomByteArray: - // you can check and validate against some other field in the context, - // return early or not validate against the context at all – your choice - case SomeOtherType: - // ... - default: - // expecting some other type? Throw/panic here or continue - } - - switch v := i.(type) { // type switch on the struct field being validated - case CustomByteArray: - for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes - if e != 0 { - return true - } - } - } - return false -}) -govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool { - switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation - case StructWithCustomByteArray: - return len(v.ID) >= v.CustomMinLength - } - return false -}) -``` - -###### Loop over Error() -By default .Error() returns all errors in a single String. To access each error you can do this: -```go - if err != nil { - errs := err.(govalidator.Errors).Errors() - for _, e := range errs { - fmt.Println(e.Error()) - } - } -``` - -###### Custom error messages -Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: -```go -type Ticket struct { - Id int64 `json:"id"` - FirstName string `json:"firstname" valid:"required~First name is blank"` -} -``` - -#### Notes -Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). -Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). - -#### Support -If you do have a contribution to the package, feel free to create a Pull Request or an Issue. - -#### What to contribute -If you don't know what to do, there are some features and functions that need to be done - -- [ ] Refactor code -- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check -- [ ] Create actual list of contributors and projects that currently using this package -- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) -- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) -- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new -- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc -- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) -- [ ] Implement fuzzing testing -- [ ] Implement some struct/map/array utilities -- [ ] Implement map/array validation -- [ ] Implement benchmarking -- [ ] Implement batch of examples -- [ ] Look at forks for new features and fixes - -#### Advice -Feel free to create what you want, but keep in mind when you implement new features: -- Code must be clear and readable, names of variables/constants clearly describes what they are doing -- Public functions must be documented and described in source file and added to README.md to the list of available functions -- There are must be unit-tests for any new functions and improvements - -## Credits -### Contributors - -This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. - -#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) -* [Daniel Lohse](https://github.com/annismckenzie) -* [Attila Oláh](https://github.com/attilaolah) -* [Daniel Korner](https://github.com/Dadie) -* [Steven Wilkin](https://github.com/stevenwilkin) -* [Deiwin Sarjas](https://github.com/deiwin) -* [Noah Shibley](https://github.com/slugmobile) -* [Nathan Davies](https://github.com/nathj07) -* [Matt Sanford](https://github.com/mzsanford) -* [Simon ccl1115](https://github.com/ccl1115) - - - - -### Backers - -Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)] - - - - -### Sponsors - -Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] - - - - - - - - - - - - - - - -## License -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go deleted file mode 100644 index 3e1da7cb48..0000000000 --- a/vendor/github.com/asaskevich/govalidator/arrays.go +++ /dev/null @@ -1,87 +0,0 @@ -package govalidator - -// Iterator is the function that accepts element of slice/array and its index -type Iterator func(interface{}, int) - -// ResultIterator is the function that accepts element of slice/array and its index and returns any result -type ResultIterator func(interface{}, int) interface{} - -// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean -type ConditionIterator func(interface{}, int) bool - -// ReduceIterator is the function that accepts two element of slice/array and returns result of merging those values -type ReduceIterator func(interface{}, interface{}) interface{} - -// Some validates that any item of array corresponds to ConditionIterator. Returns boolean. -func Some(array []interface{}, iterator ConditionIterator) bool { - res := false - for index, data := range array { - res = res || iterator(data, index) - } - return res -} - -// Every validates that every item of array corresponds to ConditionIterator. Returns boolean. -func Every(array []interface{}, iterator ConditionIterator) bool { - res := true - for index, data := range array { - res = res && iterator(data, index) - } - return res -} - -// Reduce boils down a list of values into a single value by ReduceIterator -func Reduce(array []interface{}, iterator ReduceIterator, initialValue interface{}) interface{} { - for _, data := range array { - initialValue = iterator(initialValue, data) - } - return initialValue -} - -// Each iterates over the slice and apply Iterator to every item -func Each(array []interface{}, iterator Iterator) { - for index, data := range array { - iterator(data, index) - } -} - -// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. -func Map(array []interface{}, iterator ResultIterator) []interface{} { - var result = make([]interface{}, len(array)) - for index, data := range array { - result[index] = iterator(data, index) - } - return result -} - -// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. -func Find(array []interface{}, iterator ConditionIterator) interface{} { - for index, data := range array { - if iterator(data, index) { - return data - } - } - return nil -} - -// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. -func Filter(array []interface{}, iterator ConditionIterator) []interface{} { - var result = make([]interface{}, 0) - for index, data := range array { - if iterator(data, index) { - result = append(result, data) - } - } - return result -} - -// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. -func Count(array []interface{}, iterator ConditionIterator) int { - count := 0 - for index, data := range array { - if iterator(data, index) { - count = count + 1 - } - } - return count -} diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go deleted file mode 100644 index d68e990fc2..0000000000 --- a/vendor/github.com/asaskevich/govalidator/converter.go +++ /dev/null @@ -1,81 +0,0 @@ -package govalidator - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" -) - -// ToString convert the input to a string. -func ToString(obj interface{}) string { - res := fmt.Sprintf("%v", obj) - return res -} - -// ToJSON convert the input to a valid JSON string -func ToJSON(obj interface{}) (string, error) { - res, err := json.Marshal(obj) - if err != nil { - res = []byte("") - } - return string(res), err -} - -// ToFloat convert the input string to a float, or 0.0 if the input is not a float. -func ToFloat(value interface{}) (res float64, err error) { - val := reflect.ValueOf(value) - - switch value.(type) { - case int, int8, int16, int32, int64: - res = float64(val.Int()) - case uint, uint8, uint16, uint32, uint64: - res = float64(val.Uint()) - case float32, float64: - res = val.Float() - case string: - res, err = strconv.ParseFloat(val.String(), 64) - if err != nil { - res = 0 - } - default: - err = fmt.Errorf("ToInt: unknown interface type %T", value) - res = 0 - } - - return -} - -// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. -func ToInt(value interface{}) (res int64, err error) { - val := reflect.ValueOf(value) - - switch value.(type) { - case int, int8, int16, int32, int64: - res = val.Int() - case uint, uint8, uint16, uint32, uint64: - res = int64(val.Uint()) - case float32, float64: - res = int64(val.Float()) - case string: - if IsInt(val.String()) { - res, err = strconv.ParseInt(val.String(), 0, 64) - if err != nil { - res = 0 - } - } else { - err = fmt.Errorf("ToInt: invalid numeric format %g", value) - res = 0 - } - default: - err = fmt.Errorf("ToInt: unknown interface type %T", value) - res = 0 - } - - return -} - -// ToBoolean convert the input string to a boolean. -func ToBoolean(str string) (bool, error) { - return strconv.ParseBool(str) -} diff --git a/vendor/github.com/asaskevich/govalidator/doc.go b/vendor/github.com/asaskevich/govalidator/doc.go deleted file mode 100644 index 55dce62dc8..0000000000 --- a/vendor/github.com/asaskevich/govalidator/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -package govalidator - -// A package of validators and sanitizers for strings, structures and collections. diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go deleted file mode 100644 index 1da2336f47..0000000000 --- a/vendor/github.com/asaskevich/govalidator/error.go +++ /dev/null @@ -1,47 +0,0 @@ -package govalidator - -import ( - "sort" - "strings" -) - -// Errors is an array of multiple errors and conforms to the error interface. -type Errors []error - -// Errors returns itself. -func (es Errors) Errors() []error { - return es -} - -func (es Errors) Error() string { - var errs []string - for _, e := range es { - errs = append(errs, e.Error()) - } - sort.Strings(errs) - return strings.Join(errs, ";") -} - -// Error encapsulates a name, an error and whether there's a custom error message or not. -type Error struct { - Name string - Err error - CustomErrorMessageExists bool - - // Validator indicates the name of the validator that failed - Validator string - Path []string -} - -func (e Error) Error() string { - if e.CustomErrorMessageExists { - return e.Err.Error() - } - - errName := e.Name - if len(e.Path) > 0 { - errName = strings.Join(append(e.Path, e.Name), ".") - } - - return errName + ": " + e.Err.Error() -} diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go deleted file mode 100644 index 5041d9e868..0000000000 --- a/vendor/github.com/asaskevich/govalidator/numerics.go +++ /dev/null @@ -1,100 +0,0 @@ -package govalidator - -import ( - "math" -) - -// Abs returns absolute value of number -func Abs(value float64) float64 { - return math.Abs(value) -} - -// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise -func Sign(value float64) float64 { - if value > 0 { - return 1 - } else if value < 0 { - return -1 - } else { - return 0 - } -} - -// IsNegative returns true if value < 0 -func IsNegative(value float64) bool { - return value < 0 -} - -// IsPositive returns true if value > 0 -func IsPositive(value float64) bool { - return value > 0 -} - -// IsNonNegative returns true if value >= 0 -func IsNonNegative(value float64) bool { - return value >= 0 -} - -// IsNonPositive returns true if value <= 0 -func IsNonPositive(value float64) bool { - return value <= 0 -} - -// InRangeInt returns true if value lies between left and right border -func InRangeInt(value, left, right interface{}) bool { - value64, _ := ToInt(value) - left64, _ := ToInt(left) - right64, _ := ToInt(right) - if left64 > right64 { - left64, right64 = right64, left64 - } - return value64 >= left64 && value64 <= right64 -} - -// InRangeFloat32 returns true if value lies between left and right border -func InRangeFloat32(value, left, right float32) bool { - if left > right { - left, right = right, left - } - return value >= left && value <= right -} - -// InRangeFloat64 returns true if value lies between left and right border -func InRangeFloat64(value, left, right float64) bool { - if left > right { - left, right = right, left - } - return value >= left && value <= right -} - -// InRange returns true if value lies between left and right border, generic type to handle int, float32, float64 and string. -// All types must the same type. -// False if value doesn't lie in range or if it incompatible or not comparable -func InRange(value interface{}, left interface{}, right interface{}) bool { - switch value.(type) { - case int: - intValue, _ := ToInt(value) - intLeft, _ := ToInt(left) - intRight, _ := ToInt(right) - return InRangeInt(intValue, intLeft, intRight) - case float32, float64: - intValue, _ := ToFloat(value) - intLeft, _ := ToFloat(left) - intRight, _ := ToFloat(right) - return InRangeFloat64(intValue, intLeft, intRight) - case string: - return value.(string) >= left.(string) && value.(string) <= right.(string) - default: - return false - } -} - -// IsWhole returns true if value is whole number -func IsWhole(value float64) bool { - return math.Remainder(value, 1) == 0 -} - -// IsNatural returns true if value is natural number (positive and whole) -func IsNatural(value float64) bool { - return IsWhole(value) && IsPositive(value) -} diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go deleted file mode 100644 index bafc3765ea..0000000000 --- a/vendor/github.com/asaskevich/govalidator/patterns.go +++ /dev/null @@ -1,113 +0,0 @@ -package govalidator - -import "regexp" - -// Basic regular expressions for validating strings -const ( - Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" - CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" - ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" - ISBN13 string = "^(?:[0-9]{13})$" - UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" - UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" - Alpha string = "^[a-zA-Z]+$" - Alphanumeric string = "^[a-zA-Z0-9]+$" - Numeric string = "^[0-9]+$" - Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" - Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" - Hexadecimal string = "^[0-9a-fA-F]+$" - Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" - RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" - ASCII string = "^[\x00-\x7F]+$" - Multibyte string = "[^\x00-\x7F]" - FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" - HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" - Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" - PrintableASCII string = "^[\x20-\x7E]+$" - DataURI string = "^data:.+\\/(.+);base64$" - MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$" - Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" - Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" - DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` - IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` - URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` - URLUsername string = `(\S+(:\S*)?@)` - URLPath string = `((\/|\?|#)[^\s]*)` - URLPort string = `(:(\d{1,5}))` - URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))` - URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` - URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` - SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` - WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` - UnixPath string = `^(/[^/\x00]*)+/?$` - WinARPath string = `^(?:(?:[a-zA-Z]:|\\\\[a-z0-9_.$●-]+\\[a-z0-9_.$●-]+)\\|\\?[^\\/:*?"<>|\r\n]+\\?)(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` - UnixARPath string = `^((\.{0,2}/)?([^/\x00]*))+/?$` - Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" - tagName string = "valid" - hasLowerCase string = ".*[[:lower:]]" - hasUpperCase string = ".*[[:upper:]]" - hasWhitespace string = ".*[[:space:]]" - hasWhitespaceOnly string = "^[[:space:]]+$" - IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$" - IMSI string = "^\\d{14,15}$" - E164 string = `^\+?[1-9]\d{1,14}$` -) - -// Used by IsFilePath func -const ( - // Unknown is unresolved OS type - Unknown = iota - // Win is Windows type - Win - // Unix is *nix OS types - Unix -) - -var ( - userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") - hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") - userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") - rxEmail = regexp.MustCompile(Email) - rxCreditCard = regexp.MustCompile(CreditCard) - rxISBN10 = regexp.MustCompile(ISBN10) - rxISBN13 = regexp.MustCompile(ISBN13) - rxUUID3 = regexp.MustCompile(UUID3) - rxUUID4 = regexp.MustCompile(UUID4) - rxUUID5 = regexp.MustCompile(UUID5) - rxUUID = regexp.MustCompile(UUID) - rxAlpha = regexp.MustCompile(Alpha) - rxAlphanumeric = regexp.MustCompile(Alphanumeric) - rxNumeric = regexp.MustCompile(Numeric) - rxInt = regexp.MustCompile(Int) - rxFloat = regexp.MustCompile(Float) - rxHexadecimal = regexp.MustCompile(Hexadecimal) - rxHexcolor = regexp.MustCompile(Hexcolor) - rxRGBcolor = regexp.MustCompile(RGBcolor) - rxASCII = regexp.MustCompile(ASCII) - rxPrintableASCII = regexp.MustCompile(PrintableASCII) - rxMultibyte = regexp.MustCompile(Multibyte) - rxFullWidth = regexp.MustCompile(FullWidth) - rxHalfWidth = regexp.MustCompile(HalfWidth) - rxBase64 = regexp.MustCompile(Base64) - rxDataURI = regexp.MustCompile(DataURI) - rxMagnetURI = regexp.MustCompile(MagnetURI) - rxLatitude = regexp.MustCompile(Latitude) - rxLongitude = regexp.MustCompile(Longitude) - rxDNSName = regexp.MustCompile(DNSName) - rxURL = regexp.MustCompile(URL) - rxSSN = regexp.MustCompile(SSN) - rxWinPath = regexp.MustCompile(WinPath) - rxUnixPath = regexp.MustCompile(UnixPath) - rxARWinPath = regexp.MustCompile(WinARPath) - rxARUnixPath = regexp.MustCompile(UnixARPath) - rxSemver = regexp.MustCompile(Semver) - rxHasLowerCase = regexp.MustCompile(hasLowerCase) - rxHasUpperCase = regexp.MustCompile(hasUpperCase) - rxHasWhitespace = regexp.MustCompile(hasWhitespace) - rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) - rxIMEI = regexp.MustCompile(IMEI) - rxIMSI = regexp.MustCompile(IMSI) - rxE164 = regexp.MustCompile(E164) -) diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go deleted file mode 100644 index c573abb51a..0000000000 --- a/vendor/github.com/asaskevich/govalidator/types.go +++ /dev/null @@ -1,656 +0,0 @@ -package govalidator - -import ( - "reflect" - "regexp" - "sort" - "sync" -) - -// Validator is a wrapper for a validator function that returns bool and accepts string. -type Validator func(str string) bool - -// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. -// The second parameter should be the context (in the case of validating a struct: the whole object being validated). -type CustomTypeValidator func(i interface{}, o interface{}) bool - -// ParamValidator is a wrapper for validator functions that accept additional parameters. -type ParamValidator func(str string, params ...string) bool - -// InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value -type InterfaceParamValidator func(in interface{}, params ...string) bool -type tagOptionsMap map[string]tagOption - -func (t tagOptionsMap) orderedKeys() []string { - var keys []string - for k := range t { - keys = append(keys, k) - } - - sort.Slice(keys, func(a, b int) bool { - return t[keys[a]].order < t[keys[b]].order - }) - - return keys -} - -type tagOption struct { - name string - customErrorMessage string - order int -} - -// UnsupportedTypeError is a wrapper for reflect.Type -type UnsupportedTypeError struct { - Type reflect.Type -} - -// stringValues is a slice of reflect.Value holding *reflect.StringValue. -// It implements the methods to sort by string. -type stringValues []reflect.Value - -// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value -var InterfaceParamTagMap = map[string]InterfaceParamValidator{ - "type": IsType, -} - -// InterfaceParamTagRegexMap maps interface param tags to their respective regexes. -var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{ - "type": regexp.MustCompile(`^type\((.*)\)$`), -} - -// ParamTagMap is a map of functions accept variants parameters -var ParamTagMap = map[string]ParamValidator{ - "length": ByteLength, - "range": Range, - "runelength": RuneLength, - "stringlength": StringLength, - "matches": StringMatches, - "in": IsInRaw, - "rsapub": IsRsaPub, - "minstringlength": MinStringLength, - "maxstringlength": MaxStringLength, -} - -// ParamTagRegexMap maps param tags to their respective regexes. -var ParamTagRegexMap = map[string]*regexp.Regexp{ - "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), - "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), - "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), - "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), - "in": regexp.MustCompile(`^in\((.*)\)`), - "matches": regexp.MustCompile(`^matches\((.+)\)$`), - "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), - "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"), - "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"), -} - -type customTypeTagMap struct { - validators map[string]CustomTypeValidator - - sync.RWMutex -} - -func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { - tm.RLock() - defer tm.RUnlock() - v, ok := tm.validators[name] - return v, ok -} - -func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { - tm.Lock() - defer tm.Unlock() - tm.validators[name] = ctv -} - -// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. -// Use this to validate compound or custom types that need to be handled as a whole, e.g. -// `type UUID [16]byte` (this would be handled as an array of bytes). -var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} - -// TagMap is a map of functions, that can be used as tags for ValidateStruct function. -var TagMap = map[string]Validator{ - "email": IsEmail, - "url": IsURL, - "dialstring": IsDialString, - "requrl": IsRequestURL, - "requri": IsRequestURI, - "alpha": IsAlpha, - "utfletter": IsUTFLetter, - "alphanum": IsAlphanumeric, - "utfletternum": IsUTFLetterNumeric, - "numeric": IsNumeric, - "utfnumeric": IsUTFNumeric, - "utfdigit": IsUTFDigit, - "hexadecimal": IsHexadecimal, - "hexcolor": IsHexcolor, - "rgbcolor": IsRGBcolor, - "lowercase": IsLowerCase, - "uppercase": IsUpperCase, - "int": IsInt, - "float": IsFloat, - "null": IsNull, - "notnull": IsNotNull, - "uuid": IsUUID, - "uuidv3": IsUUIDv3, - "uuidv4": IsUUIDv4, - "uuidv5": IsUUIDv5, - "creditcard": IsCreditCard, - "isbn10": IsISBN10, - "isbn13": IsISBN13, - "json": IsJSON, - "multibyte": IsMultibyte, - "ascii": IsASCII, - "printableascii": IsPrintableASCII, - "fullwidth": IsFullWidth, - "halfwidth": IsHalfWidth, - "variablewidth": IsVariableWidth, - "base64": IsBase64, - "datauri": IsDataURI, - "ip": IsIP, - "port": IsPort, - "ipv4": IsIPv4, - "ipv6": IsIPv6, - "dns": IsDNSName, - "host": IsHost, - "mac": IsMAC, - "latitude": IsLatitude, - "longitude": IsLongitude, - "ssn": IsSSN, - "semver": IsSemver, - "rfc3339": IsRFC3339, - "rfc3339WithoutZone": IsRFC3339WithoutZone, - "ISO3166Alpha2": IsISO3166Alpha2, - "ISO3166Alpha3": IsISO3166Alpha3, - "ISO4217": IsISO4217, - "IMEI": IsIMEI, - "ulid": IsULID, -} - -// ISO3166Entry stores country codes -type ISO3166Entry struct { - EnglishShortName string - FrenchShortName string - Alpha2Code string - Alpha3Code string - Numeric string -} - -//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" -var ISO3166List = []ISO3166Entry{ - {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, - {"Albania", "Albanie (l')", "AL", "ALB", "008"}, - {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, - {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, - {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, - {"Andorra", "Andorre (l')", "AD", "AND", "020"}, - {"Angola", "Angola (l')", "AO", "AGO", "024"}, - {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, - {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, - {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, - {"Australia", "Australie (l')", "AU", "AUS", "036"}, - {"Austria", "Autriche (l')", "AT", "AUT", "040"}, - {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, - {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, - {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, - {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, - {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, - {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, - {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, - {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, - {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, - {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, - {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, - {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"}, - {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, - {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, - {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, - {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"}, - {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"}, - {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, - {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, - {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, - {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, - {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, - {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, - {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, - {"Canada", "Canada (le)", "CA", "CAN", "124"}, - {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, - {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"}, - {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, - {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, - {"Chad", "Tchad (le)", "TD", "TCD", "148"}, - {"Chile", "Chili (le)", "CL", "CHL", "152"}, - {"China", "Chine (la)", "CN", "CHN", "156"}, - {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, - {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"}, - {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"}, - {"Colombia", "Colombie (la)", "CO", "COL", "170"}, - {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, - {"Mayotte", "Mayotte", "YT", "MYT", "175"}, - {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, - {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, - {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"}, - {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, - {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, - {"Cuba", "Cuba", "CU", "CUB", "192"}, - {"Cyprus", "Chypre", "CY", "CYP", "196"}, - {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, - {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, - {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, - {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, - {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, - {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, - {"El Salvador", "El Salvador", "SV", "SLV", "222"}, - {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, - {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, - {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, - {"Estonia", "Estonie (l')", "EE", "EST", "233"}, - {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"}, - {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"}, - {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"}, - {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, - {"Finland", "Finlande (la)", "FI", "FIN", "246"}, - {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"}, - {"France", "France (la)", "FR", "FRA", "250"}, - {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, - {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, - {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, - {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, - {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, - {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, - {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, - {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, - {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, - {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, - {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, - {"Kiribati", "Kiribati", "KI", "KIR", "296"}, - {"Greece", "Grèce (la)", "GR", "GRC", "300"}, - {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, - {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, - {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, - {"Guam", "Guam", "GU", "GUM", "316"}, - {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, - {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, - {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, - {"Haiti", "Haïti", "HT", "HTI", "332"}, - {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"}, - {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, - {"Honduras", "Honduras (le)", "HN", "HND", "340"}, - {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, - {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, - {"Iceland", "Islande (l')", "IS", "ISL", "352"}, - {"India", "Inde (l')", "IN", "IND", "356"}, - {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, - {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, - {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, - {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, - {"Israel", "Israël", "IL", "ISR", "376"}, - {"Italy", "Italie (l')", "IT", "ITA", "380"}, - {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, - {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, - {"Japan", "Japon (le)", "JP", "JPN", "392"}, - {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, - {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, - {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, - {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, - {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, - {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, - {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, - {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, - {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, - {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, - {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, - {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, - {"Libya", "Libye (la)", "LY", "LBY", "434"}, - {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, - {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, - {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, - {"Macao", "Macao", "MO", "MAC", "446"}, - {"Madagascar", "Madagascar", "MG", "MDG", "450"}, - {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, - {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, - {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, - {"Mali", "Mali (le)", "ML", "MLI", "466"}, - {"Malta", "Malte", "MT", "MLT", "470"}, - {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, - {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, - {"Mauritius", "Maurice", "MU", "MUS", "480"}, - {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, - {"Monaco", "Monaco", "MC", "MCO", "492"}, - {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, - {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, - {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, - {"Montserrat", "Montserrat", "MS", "MSR", "500"}, - {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, - {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, - {"Oman", "Oman", "OM", "OMN", "512"}, - {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, - {"Nauru", "Nauru", "NR", "NRU", "520"}, - {"Nepal", "Népal (le)", "NP", "NPL", "524"}, - {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, - {"Curaçao", "Curaçao", "CW", "CUW", "531"}, - {"Aruba", "Aruba", "AW", "ABW", "533"}, - {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, - {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, - {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, - {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, - {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, - {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, - {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, - {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, - {"Niue", "Niue", "NU", "NIU", "570"}, - {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"}, - {"Norway", "Norvège (la)", "NO", "NOR", "578"}, - {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"}, - {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, - {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, - {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"}, - {"Palau", "Palaos (les)", "PW", "PLW", "585"}, - {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, - {"Panama", "Panama (le)", "PA", "PAN", "591"}, - {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, - {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, - {"Peru", "Pérou (le)", "PE", "PER", "604"}, - {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, - {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, - {"Poland", "Pologne (la)", "PL", "POL", "616"}, - {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, - {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, - {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, - {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, - {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, - {"Réunion", "Réunion (La)", "RE", "REU", "638"}, - {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, - {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, - {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, - {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, - {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, - {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, - {"Anguilla", "Anguilla", "AI", "AIA", "660"}, - {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, - {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, - {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, - {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, - {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, - {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, - {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, - {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, - {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, - {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, - {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, - {"Singapore", "Singapour", "SG", "SGP", "702"}, - {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, - {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, - {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, - {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, - {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, - {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, - {"Spain", "Espagne (l')", "ES", "ESP", "724"}, - {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, - {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, - {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, - {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, - {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"}, - {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, - {"Sweden", "Suède (la)", "SE", "SWE", "752"}, - {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, - {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, - {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, - {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, - {"Togo", "Togo (le)", "TG", "TGO", "768"}, - {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, - {"Tonga", "Tonga (les)", "TO", "TON", "776"}, - {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, - {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, - {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, - {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, - {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, - {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"}, - {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, - {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, - {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, - {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, - {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, - {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, - {"Guernsey", "Guernesey", "GG", "GGY", "831"}, - {"Jersey", "Jersey", "JE", "JEY", "832"}, - {"Isle of Man", "Île de Man", "IM", "IMN", "833"}, - {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, - {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, - {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"}, - {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, - {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, - {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, - {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, - {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, - {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, - {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, - {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, -} - -// ISO4217List is the list of ISO currency codes -var ISO4217List = []string{ - "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", - "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", - "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", - "DJF", "DKK", "DOP", "DZD", - "EGP", "ERN", "ETB", "EUR", - "FJD", "FKP", - "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", - "HKD", "HNL", "HRK", "HTG", "HUF", - "IDR", "ILS", "INR", "IQD", "IRR", "ISK", - "JMD", "JOD", "JPY", - "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", - "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", - "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", - "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", - "OMR", - "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", - "QAR", - "RON", "RSD", "RUB", "RWF", - "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL", - "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", - "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS", - "VEF", "VES", "VND", "VUV", - "WST", - "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", - "YER", - "ZAR", "ZMW", "ZWL", -} - -// ISO693Entry stores ISO language codes -type ISO693Entry struct { - Alpha3bCode string - Alpha2Code string - English string -} - -//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json -var ISO693List = []ISO693Entry{ - {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, - {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, - {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, - {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, - {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, - {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, - {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, - {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, - {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, - {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, - {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, - {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, - {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, - {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, - {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, - {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, - {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, - {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, - {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, - {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, - {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, - {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, - {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, - {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, - {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, - {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, - {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, - {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, - {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, - {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, - {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, - {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, - {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, - {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, - {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, - {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, - {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, - {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, - {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, - {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, - {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, - {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, - {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, - {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, - {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, - {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, - {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, - {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, - {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, - {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, - {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, - {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, - {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, - {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, - {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, - {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, - {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, - {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, - {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, - {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, - {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, - {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, - {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, - {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, - {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, - {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, - {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, - {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, - {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, - {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, - {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, - {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, - {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, - {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, - {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, - {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, - {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, - {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, - {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, - {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, - {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, - {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, - {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, - {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, - {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, - {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, - {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, - {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, - {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, - {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, - {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, - {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, - {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, - {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, - {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, - {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, - {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, - {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, - {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, - {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, - {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, - {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, - {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, - {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, - {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, - {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, - {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, - {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, - {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, - {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, - {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, - {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, - {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, - {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, - {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, - {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, - {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, - {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"}, - {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, - {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, - {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, - {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, - {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, - {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, - {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, - {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, - {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, - {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, - {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, - {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, - {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, - {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, - {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, - {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, - {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, - {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, - {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, - {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, - {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, - {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, - {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, - {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, - {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, - {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, - {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, - {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, - {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, - {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, - {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, - {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, - {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, - {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, - {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, - {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, - {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, - {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, - {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, - {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, - {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, - {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, - {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, - {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, - {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, - {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, - {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, - {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, - {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, - {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, - {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, - {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, - {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, - {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, - {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, - {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, - {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, - {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, - {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, - {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, - {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, - {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, - {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, - {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, - {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, - {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, -} diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go deleted file mode 100644 index f4c30f824a..0000000000 --- a/vendor/github.com/asaskevich/govalidator/utils.go +++ /dev/null @@ -1,270 +0,0 @@ -package govalidator - -import ( - "errors" - "fmt" - "html" - "math" - "path" - "regexp" - "strings" - "unicode" - "unicode/utf8" -) - -// Contains checks if the string contains the substring. -func Contains(str, substring string) bool { - return strings.Contains(str, substring) -} - -// Matches checks if string matches the pattern (pattern is regular expression) -// In case of error return false -func Matches(str, pattern string) bool { - match, _ := regexp.MatchString(pattern, str) - return match -} - -// LeftTrim trims characters from the left side of the input. -// If second argument is empty, it will remove leading spaces. -func LeftTrim(str, chars string) string { - if chars == "" { - return strings.TrimLeftFunc(str, unicode.IsSpace) - } - r, _ := regexp.Compile("^[" + chars + "]+") - return r.ReplaceAllString(str, "") -} - -// RightTrim trims characters from the right side of the input. -// If second argument is empty, it will remove trailing spaces. -func RightTrim(str, chars string) string { - if chars == "" { - return strings.TrimRightFunc(str, unicode.IsSpace) - } - r, _ := regexp.Compile("[" + chars + "]+$") - return r.ReplaceAllString(str, "") -} - -// Trim trims characters from both sides of the input. -// If second argument is empty, it will remove spaces. -func Trim(str, chars string) string { - return LeftTrim(RightTrim(str, chars), chars) -} - -// WhiteList removes characters that do not appear in the whitelist. -func WhiteList(str, chars string) string { - pattern := "[^" + chars + "]+" - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, "") -} - -// BlackList removes characters that appear in the blacklist. -func BlackList(str, chars string) string { - pattern := "[" + chars + "]+" - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, "") -} - -// StripLow removes characters with a numerical value < 32 and 127, mostly control characters. -// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). -func StripLow(str string, keepNewLines bool) string { - chars := "" - if keepNewLines { - chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" - } else { - chars = "\x00-\x1F\x7F" - } - return BlackList(str, chars) -} - -// ReplacePattern replaces regular expression pattern in string -func ReplacePattern(str, pattern, replace string) string { - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, replace) -} - -// Escape replaces <, >, & and " with HTML entities. -var Escape = html.EscapeString - -func addSegment(inrune, segment []rune) []rune { - if len(segment) == 0 { - return inrune - } - if len(inrune) != 0 { - inrune = append(inrune, '_') - } - inrune = append(inrune, segment...) - return inrune -} - -// UnderscoreToCamelCase converts from underscore separated form to camel case form. -// Ex.: my_func => MyFunc -func UnderscoreToCamelCase(s string) string { - return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) -} - -// CamelCaseToUnderscore converts from camel case form to underscore separated form. -// Ex.: MyFunc => my_func -func CamelCaseToUnderscore(str string) string { - var output []rune - var segment []rune - for _, r := range str { - - // not treat number as separate segment - if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { - output = addSegment(output, segment) - segment = nil - } - segment = append(segment, unicode.ToLower(r)) - } - output = addSegment(output, segment) - return string(output) -} - -// Reverse returns reversed string -func Reverse(s string) string { - r := []rune(s) - for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r) -} - -// GetLines splits string by "\n" and return array of lines -func GetLines(s string) []string { - return strings.Split(s, "\n") -} - -// GetLine returns specified line of multiline string -func GetLine(s string, index int) (string, error) { - lines := GetLines(s) - if index < 0 || index >= len(lines) { - return "", errors.New("line index out of bounds") - } - return lines[index], nil -} - -// RemoveTags removes all tags from HTML string -func RemoveTags(s string) string { - return ReplacePattern(s, "<[^>]*>", "") -} - -// SafeFileName returns safe string that can be used in file names -func SafeFileName(str string) string { - name := strings.ToLower(str) - name = path.Clean(path.Base(name)) - name = strings.Trim(name, " ") - separators, err := regexp.Compile(`[ &_=+:]`) - if err == nil { - name = separators.ReplaceAllString(name, "-") - } - legal, err := regexp.Compile(`[^[:alnum:]-.]`) - if err == nil { - name = legal.ReplaceAllString(name, "") - } - for strings.Contains(name, "--") { - name = strings.Replace(name, "--", "-", -1) - } - return name -} - -// NormalizeEmail canonicalize an email address. -// The local part of the email address is lowercased for all domains; the hostname is always lowercased and -// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). -// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and -// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are -// normalized to @gmail.com. -func NormalizeEmail(str string) (string, error) { - if !IsEmail(str) { - return "", fmt.Errorf("%s is not an email", str) - } - parts := strings.Split(str, "@") - parts[0] = strings.ToLower(parts[0]) - parts[1] = strings.ToLower(parts[1]) - if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { - parts[1] = "gmail.com" - parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] - } - return strings.Join(parts, "@"), nil -} - -// Truncate a string to the closest length without breaking words. -func Truncate(str string, length int, ending string) string { - var aftstr, befstr string - if len(str) > length { - words := strings.Fields(str) - before, present := 0, 0 - for i := range words { - befstr = aftstr - before = present - aftstr = aftstr + words[i] + " " - present = len(aftstr) - if present > length && i != 0 { - if (length - before) < (present - length) { - return Trim(befstr, " /\\.,\"'#!?&@+-") + ending - } - return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending - } - } - } - - return str -} - -// PadLeft pads left side of a string if size of string is less then indicated pad length -func PadLeft(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, true, false) -} - -// PadRight pads right side of a string if size of string is less then indicated pad length -func PadRight(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, false, true) -} - -// PadBoth pads both sides of a string if size of string is less then indicated pad length -func PadBoth(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, true, true) -} - -// PadString either left, right or both sides. -// Note that padding string can be unicode and more then one character -func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { - - // When padded length is less then the current string size - if padLen < utf8.RuneCountInString(str) { - return str - } - - padLen -= utf8.RuneCountInString(str) - - targetLen := padLen - - targetLenLeft := targetLen - targetLenRight := targetLen - if padLeft && padRight { - targetLenLeft = padLen / 2 - targetLenRight = padLen - targetLenLeft - } - - strToRepeatLen := utf8.RuneCountInString(padStr) - - repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) - repeatedString := strings.Repeat(padStr, repeatTimes) - - leftSide := "" - if padLeft { - leftSide = repeatedString[0:targetLenLeft] - } - - rightSide := "" - if padRight { - rightSide = repeatedString[0:targetLenRight] - } - - return leftSide + str + rightSide -} - -// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object -func TruncatingErrorf(str string, args ...interface{}) error { - n := strings.Count(str, "%s") - return fmt.Errorf(str, args[:n]...) -} diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go deleted file mode 100644 index c9c4fac065..0000000000 --- a/vendor/github.com/asaskevich/govalidator/validator.go +++ /dev/null @@ -1,1768 +0,0 @@ -// Package govalidator is package of validators and sanitizers for strings, structs and collections. -package govalidator - -import ( - "bytes" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/json" - "encoding/pem" - "fmt" - "io/ioutil" - "net" - "net/url" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" -) - -var ( - fieldsRequiredByDefault bool - nilPtrAllowedByRequired = false - notNumberRegexp = regexp.MustCompile("[^0-9]+") - whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) - paramsRegexp = regexp.MustCompile(`\(.*\)$`) -) - -const maxURLRuneCount = 2083 -const minURLRuneCount = 3 -const rfc3339WithoutZone = "2006-01-02T15:04:05" - -// SetFieldsRequiredByDefault causes validation to fail when struct fields -// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). -// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): -// type exampleStruct struct { -// Name string `` -// Email string `valid:"email"` -// This, however, will only fail when Email is empty or an invalid email address: -// type exampleStruct2 struct { -// Name string `valid:"-"` -// Email string `valid:"email"` -// Lastly, this will only fail when Email is an invalid email address but not when it's empty: -// type exampleStruct2 struct { -// Name string `valid:"-"` -// Email string `valid:"email,optional"` -func SetFieldsRequiredByDefault(value bool) { - fieldsRequiredByDefault = value -} - -// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required. -// The validation will still reject ptr fields in their zero value state. Example with this enabled: -// type exampleStruct struct { -// Name *string `valid:"required"` -// With `Name` set to "", this will be considered invalid input and will cause a validation error. -// With `Name` set to nil, this will be considered valid by validation. -// By default this is disabled. -func SetNilPtrAllowedByRequired(value bool) { - nilPtrAllowedByRequired = value -} - -// IsEmail checks if the string is an email. -func IsEmail(str string) bool { - // TODO uppercase letters are not supported - return rxEmail.MatchString(str) -} - -// IsExistingEmail checks if the string is an email of existing domain -func IsExistingEmail(email string) bool { - - if len(email) < 6 || len(email) > 254 { - return false - } - at := strings.LastIndex(email, "@") - if at <= 0 || at > len(email)-3 { - return false - } - user := email[:at] - host := email[at+1:] - if len(user) > 64 { - return false - } - switch host { - case "localhost", "example.com": - return true - } - if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { - return false - } - if _, err := net.LookupMX(host); err != nil { - if _, err := net.LookupIP(host); err != nil { - return false - } - } - - return true -} - -// IsURL checks if the string is an URL. -func IsURL(str string) bool { - if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { - return false - } - strTemp := str - if strings.Contains(str, ":") && !strings.Contains(str, "://") { - // support no indicated urlscheme but with colon for port number - // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString - strTemp = "http://" + str - } - u, err := url.Parse(strTemp) - if err != nil { - return false - } - if strings.HasPrefix(u.Host, ".") { - return false - } - if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { - return false - } - return rxURL.MatchString(str) -} - -// IsRequestURL checks if the string rawurl, assuming -// it was received in an HTTP request, is a valid -// URL confirm to RFC 3986 -func IsRequestURL(rawurl string) bool { - url, err := url.ParseRequestURI(rawurl) - if err != nil { - return false //Couldn't even parse the rawurl - } - if len(url.Scheme) == 0 { - return false //No Scheme found - } - return true -} - -// IsRequestURI checks if the string rawurl, assuming -// it was received in an HTTP request, is an -// absolute URI or an absolute path. -func IsRequestURI(rawurl string) bool { - _, err := url.ParseRequestURI(rawurl) - return err == nil -} - -// IsAlpha checks if the string contains only letters (a-zA-Z). Empty string is valid. -func IsAlpha(str string) bool { - if IsNull(str) { - return true - } - return rxAlpha.MatchString(str) -} - -//IsUTFLetter checks if the string contains only unicode letter characters. -//Similar to IsAlpha but for all languages. Empty string is valid. -func IsUTFLetter(str string) bool { - if IsNull(str) { - return true - } - - for _, c := range str { - if !unicode.IsLetter(c) { - return false - } - } - return true - -} - -// IsAlphanumeric checks if the string contains only letters and numbers. Empty string is valid. -func IsAlphanumeric(str string) bool { - if IsNull(str) { - return true - } - return rxAlphanumeric.MatchString(str) -} - -// IsUTFLetterNumeric checks if the string contains only unicode letters and numbers. Empty string is valid. -func IsUTFLetterNumeric(str string) bool { - if IsNull(str) { - return true - } - for _, c := range str { - if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok - return false - } - } - return true - -} - -// IsNumeric checks if the string contains only numbers. Empty string is valid. -func IsNumeric(str string) bool { - if IsNull(str) { - return true - } - return rxNumeric.MatchString(str) -} - -// IsUTFNumeric checks if the string contains only unicode numbers of any kind. -// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid. -func IsUTFNumeric(str string) bool { - if IsNull(str) { - return true - } - if strings.IndexAny(str, "+-") > 0 { - return false - } - if len(str) > 1 { - str = strings.TrimPrefix(str, "-") - str = strings.TrimPrefix(str, "+") - } - for _, c := range str { - if !unicode.IsNumber(c) { //numbers && minus sign are ok - return false - } - } - return true - -} - -// IsUTFDigit checks if the string contains only unicode radix-10 decimal digits. Empty string is valid. -func IsUTFDigit(str string) bool { - if IsNull(str) { - return true - } - if strings.IndexAny(str, "+-") > 0 { - return false - } - if len(str) > 1 { - str = strings.TrimPrefix(str, "-") - str = strings.TrimPrefix(str, "+") - } - for _, c := range str { - if !unicode.IsDigit(c) { //digits && minus sign are ok - return false - } - } - return true - -} - -// IsHexadecimal checks if the string is a hexadecimal number. -func IsHexadecimal(str string) bool { - return rxHexadecimal.MatchString(str) -} - -// IsHexcolor checks if the string is a hexadecimal color. -func IsHexcolor(str string) bool { - return rxHexcolor.MatchString(str) -} - -// IsRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB). -func IsRGBcolor(str string) bool { - return rxRGBcolor.MatchString(str) -} - -// IsLowerCase checks if the string is lowercase. Empty string is valid. -func IsLowerCase(str string) bool { - if IsNull(str) { - return true - } - return str == strings.ToLower(str) -} - -// IsUpperCase checks if the string is uppercase. Empty string is valid. -func IsUpperCase(str string) bool { - if IsNull(str) { - return true - } - return str == strings.ToUpper(str) -} - -// HasLowerCase checks if the string contains at least 1 lowercase. Empty string is valid. -func HasLowerCase(str string) bool { - if IsNull(str) { - return true - } - return rxHasLowerCase.MatchString(str) -} - -// HasUpperCase checks if the string contains as least 1 uppercase. Empty string is valid. -func HasUpperCase(str string) bool { - if IsNull(str) { - return true - } - return rxHasUpperCase.MatchString(str) -} - -// IsInt checks if the string is an integer. Empty string is valid. -func IsInt(str string) bool { - if IsNull(str) { - return true - } - return rxInt.MatchString(str) -} - -// IsFloat checks if the string is a float. -func IsFloat(str string) bool { - return str != "" && rxFloat.MatchString(str) -} - -// IsDivisibleBy checks if the string is a number that's divisible by another. -// If second argument is not valid integer or zero, it's return false. -// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). -func IsDivisibleBy(str, num string) bool { - f, _ := ToFloat(str) - p := int64(f) - q, _ := ToInt(num) - if q == 0 { - return false - } - return (p == 0) || (p%q == 0) -} - -// IsNull checks if the string is null. -func IsNull(str string) bool { - return len(str) == 0 -} - -// IsNotNull checks if the string is not null. -func IsNotNull(str string) bool { - return !IsNull(str) -} - -// HasWhitespaceOnly checks the string only contains whitespace -func HasWhitespaceOnly(str string) bool { - return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) -} - -// HasWhitespace checks if the string contains any whitespace -func HasWhitespace(str string) bool { - return len(str) > 0 && rxHasWhitespace.MatchString(str) -} - -// IsByteLength checks if the string's length (in bytes) falls in a range. -func IsByteLength(str string, min, max int) bool { - return len(str) >= min && len(str) <= max -} - -// IsUUIDv3 checks if the string is a UUID version 3. -func IsUUIDv3(str string) bool { - return rxUUID3.MatchString(str) -} - -// IsUUIDv4 checks if the string is a UUID version 4. -func IsUUIDv4(str string) bool { - return rxUUID4.MatchString(str) -} - -// IsUUIDv5 checks if the string is a UUID version 5. -func IsUUIDv5(str string) bool { - return rxUUID5.MatchString(str) -} - -// IsUUID checks if the string is a UUID (version 3, 4 or 5). -func IsUUID(str string) bool { - return rxUUID.MatchString(str) -} - -// Byte to index table for O(1) lookups when unmarshaling. -// We use 0xFF as sentinel value for invalid indexes. -var ulidDec = [...]byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, - 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, - 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, - 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, - 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, - 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, - 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, -} - -// EncodedSize is the length of a text encoded ULID. -const ulidEncodedSize = 26 - -// IsULID checks if the string is a ULID. -// -// Implementation got from: -// https://github.com/oklog/ulid (Apache-2.0 License) -// -func IsULID(str string) bool { - // Check if a base32 encoded ULID is the right length. - if len(str) != ulidEncodedSize { - return false - } - - // Check if all the characters in a base32 encoded ULID are part of the - // expected base32 character set. - if ulidDec[str[0]] == 0xFF || - ulidDec[str[1]] == 0xFF || - ulidDec[str[2]] == 0xFF || - ulidDec[str[3]] == 0xFF || - ulidDec[str[4]] == 0xFF || - ulidDec[str[5]] == 0xFF || - ulidDec[str[6]] == 0xFF || - ulidDec[str[7]] == 0xFF || - ulidDec[str[8]] == 0xFF || - ulidDec[str[9]] == 0xFF || - ulidDec[str[10]] == 0xFF || - ulidDec[str[11]] == 0xFF || - ulidDec[str[12]] == 0xFF || - ulidDec[str[13]] == 0xFF || - ulidDec[str[14]] == 0xFF || - ulidDec[str[15]] == 0xFF || - ulidDec[str[16]] == 0xFF || - ulidDec[str[17]] == 0xFF || - ulidDec[str[18]] == 0xFF || - ulidDec[str[19]] == 0xFF || - ulidDec[str[20]] == 0xFF || - ulidDec[str[21]] == 0xFF || - ulidDec[str[22]] == 0xFF || - ulidDec[str[23]] == 0xFF || - ulidDec[str[24]] == 0xFF || - ulidDec[str[25]] == 0xFF { - return false - } - - // Check if the first character in a base32 encoded ULID will overflow. This - // happens because the base32 representation encodes 130 bits, while the - // ULID is only 128 bits. - // - // See https://github.com/oklog/ulid/issues/9 for details. - if str[0] > '7' { - return false - } - return true -} - -// IsCreditCard checks if the string is a credit card. -func IsCreditCard(str string) bool { - sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") - if !rxCreditCard.MatchString(sanitized) { - return false - } - - number, _ := ToInt(sanitized) - number, lastDigit := number / 10, number % 10 - - var sum int64 - for i:=0; number > 0; i++ { - digit := number % 10 - - if i % 2 == 0 { - digit *= 2 - if digit > 9 { - digit -= 9 - } - } - - sum += digit - number = number / 10 - } - - return (sum + lastDigit) % 10 == 0 -} - -// IsISBN10 checks if the string is an ISBN version 10. -func IsISBN10(str string) bool { - return IsISBN(str, 10) -} - -// IsISBN13 checks if the string is an ISBN version 13. -func IsISBN13(str string) bool { - return IsISBN(str, 13) -} - -// IsISBN checks if the string is an ISBN (version 10 or 13). -// If version value is not equal to 10 or 13, it will be checks both variants. -func IsISBN(str string, version int) bool { - sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") - var checksum int32 - var i int32 - if version == 10 { - if !rxISBN10.MatchString(sanitized) { - return false - } - for i = 0; i < 9; i++ { - checksum += (i + 1) * int32(sanitized[i]-'0') - } - if sanitized[9] == 'X' { - checksum += 10 * 10 - } else { - checksum += 10 * int32(sanitized[9]-'0') - } - if checksum%11 == 0 { - return true - } - return false - } else if version == 13 { - if !rxISBN13.MatchString(sanitized) { - return false - } - factor := []int32{1, 3} - for i = 0; i < 12; i++ { - checksum += factor[i%2] * int32(sanitized[i]-'0') - } - return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 - } - return IsISBN(str, 10) || IsISBN(str, 13) -} - -// IsJSON checks if the string is valid JSON (note: uses json.Unmarshal). -func IsJSON(str string) bool { - var js json.RawMessage - return json.Unmarshal([]byte(str), &js) == nil -} - -// IsMultibyte checks if the string contains one or more multibyte chars. Empty string is valid. -func IsMultibyte(str string) bool { - if IsNull(str) { - return true - } - return rxMultibyte.MatchString(str) -} - -// IsASCII checks if the string contains ASCII chars only. Empty string is valid. -func IsASCII(str string) bool { - if IsNull(str) { - return true - } - return rxASCII.MatchString(str) -} - -// IsPrintableASCII checks if the string contains printable ASCII chars only. Empty string is valid. -func IsPrintableASCII(str string) bool { - if IsNull(str) { - return true - } - return rxPrintableASCII.MatchString(str) -} - -// IsFullWidth checks if the string contains any full-width chars. Empty string is valid. -func IsFullWidth(str string) bool { - if IsNull(str) { - return true - } - return rxFullWidth.MatchString(str) -} - -// IsHalfWidth checks if the string contains any half-width chars. Empty string is valid. -func IsHalfWidth(str string) bool { - if IsNull(str) { - return true - } - return rxHalfWidth.MatchString(str) -} - -// IsVariableWidth checks if the string contains a mixture of full and half-width chars. Empty string is valid. -func IsVariableWidth(str string) bool { - if IsNull(str) { - return true - } - return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) -} - -// IsBase64 checks if a string is base64 encoded. -func IsBase64(str string) bool { - return rxBase64.MatchString(str) -} - -// IsFilePath checks is a string is Win or Unix file path and returns it's type. -func IsFilePath(str string) (bool, int) { - if rxWinPath.MatchString(str) { - //check windows path limit see: - // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath - if len(str[3:]) > 32767 { - return false, Win - } - return true, Win - } else if rxUnixPath.MatchString(str) { - return true, Unix - } - return false, Unknown -} - -//IsWinFilePath checks both relative & absolute paths in Windows -func IsWinFilePath(str string) bool { - if rxARWinPath.MatchString(str) { - //check windows path limit see: - // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath - if len(str[3:]) > 32767 { - return false - } - return true - } - return false -} - -//IsUnixFilePath checks both relative & absolute paths in Unix -func IsUnixFilePath(str string) bool { - if rxARUnixPath.MatchString(str) { - return true - } - return false -} - -// IsDataURI checks if a string is base64 encoded data URI such as an image -func IsDataURI(str string) bool { - dataURI := strings.Split(str, ",") - if !rxDataURI.MatchString(dataURI[0]) { - return false - } - return IsBase64(dataURI[1]) -} - -// IsMagnetURI checks if a string is valid magnet URI -func IsMagnetURI(str string) bool { - return rxMagnetURI.MatchString(str) -} - -// IsISO3166Alpha2 checks if a string is valid two-letter country code -func IsISO3166Alpha2(str string) bool { - for _, entry := range ISO3166List { - if str == entry.Alpha2Code { - return true - } - } - return false -} - -// IsISO3166Alpha3 checks if a string is valid three-letter country code -func IsISO3166Alpha3(str string) bool { - for _, entry := range ISO3166List { - if str == entry.Alpha3Code { - return true - } - } - return false -} - -// IsISO693Alpha2 checks if a string is valid two-letter language code -func IsISO693Alpha2(str string) bool { - for _, entry := range ISO693List { - if str == entry.Alpha2Code { - return true - } - } - return false -} - -// IsISO693Alpha3b checks if a string is valid three-letter language code -func IsISO693Alpha3b(str string) bool { - for _, entry := range ISO693List { - if str == entry.Alpha3bCode { - return true - } - } - return false -} - -// IsDNSName will validate the given string as a DNS name -func IsDNSName(str string) bool { - if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { - // constraints already violated - return false - } - return !IsIP(str) && rxDNSName.MatchString(str) -} - -// IsHash checks if a string is a hash of type algorithm. -// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] -func IsHash(str string, algorithm string) bool { - var len string - algo := strings.ToLower(algorithm) - - if algo == "crc32" || algo == "crc32b" { - len = "8" - } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" { - len = "32" - } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" { - len = "40" - } else if algo == "tiger192" { - len = "48" - } else if algo == "sha3-224" { - len = "56" - } else if algo == "sha256" || algo == "sha3-256" { - len = "64" - } else if algo == "sha384" || algo == "sha3-384" { - len = "96" - } else if algo == "sha512" || algo == "sha3-512" { - len = "128" - } else { - return false - } - - return Matches(str, "^[a-f0-9]{"+len+"}$") -} - -// IsSHA3224 checks is a string is a SHA3-224 hash. Alias for `IsHash(str, "sha3-224")` -func IsSHA3224(str string) bool { - return IsHash(str, "sha3-224") -} - -// IsSHA3256 checks is a string is a SHA3-256 hash. Alias for `IsHash(str, "sha3-256")` -func IsSHA3256(str string) bool { - return IsHash(str, "sha3-256") -} - -// IsSHA3384 checks is a string is a SHA3-384 hash. Alias for `IsHash(str, "sha3-384")` -func IsSHA3384(str string) bool { - return IsHash(str, "sha3-384") -} - -// IsSHA3512 checks is a string is a SHA3-512 hash. Alias for `IsHash(str, "sha3-512")` -func IsSHA3512(str string) bool { - return IsHash(str, "sha3-512") -} - -// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")` -func IsSHA512(str string) bool { - return IsHash(str, "sha512") -} - -// IsSHA384 checks is a string is a SHA384 hash. Alias for `IsHash(str, "sha384")` -func IsSHA384(str string) bool { - return IsHash(str, "sha384") -} - -// IsSHA256 checks is a string is a SHA256 hash. Alias for `IsHash(str, "sha256")` -func IsSHA256(str string) bool { - return IsHash(str, "sha256") -} - -// IsTiger192 checks is a string is a Tiger192 hash. Alias for `IsHash(str, "tiger192")` -func IsTiger192(str string) bool { - return IsHash(str, "tiger192") -} - -// IsTiger160 checks is a string is a Tiger160 hash. Alias for `IsHash(str, "tiger160")` -func IsTiger160(str string) bool { - return IsHash(str, "tiger160") -} - -// IsRipeMD160 checks is a string is a RipeMD160 hash. Alias for `IsHash(str, "ripemd160")` -func IsRipeMD160(str string) bool { - return IsHash(str, "ripemd160") -} - -// IsSHA1 checks is a string is a SHA-1 hash. Alias for `IsHash(str, "sha1")` -func IsSHA1(str string) bool { - return IsHash(str, "sha1") -} - -// IsTiger128 checks is a string is a Tiger128 hash. Alias for `IsHash(str, "tiger128")` -func IsTiger128(str string) bool { - return IsHash(str, "tiger128") -} - -// IsRipeMD128 checks is a string is a RipeMD128 hash. Alias for `IsHash(str, "ripemd128")` -func IsRipeMD128(str string) bool { - return IsHash(str, "ripemd128") -} - -// IsCRC32 checks is a string is a CRC32 hash. Alias for `IsHash(str, "crc32")` -func IsCRC32(str string) bool { - return IsHash(str, "crc32") -} - -// IsCRC32b checks is a string is a CRC32b hash. Alias for `IsHash(str, "crc32b")` -func IsCRC32b(str string) bool { - return IsHash(str, "crc32b") -} - -// IsMD5 checks is a string is a MD5 hash. Alias for `IsHash(str, "md5")` -func IsMD5(str string) bool { - return IsHash(str, "md5") -} - -// IsMD4 checks is a string is a MD4 hash. Alias for `IsHash(str, "md4")` -func IsMD4(str string) bool { - return IsHash(str, "md4") -} - -// IsDialString validates the given string for usage with the various Dial() functions -func IsDialString(str string) bool { - if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { - return true - } - - return false -} - -// IsIP checks if a string is either IP version 4 or 6. Alias for `net.ParseIP` -func IsIP(str string) bool { - return net.ParseIP(str) != nil -} - -// IsPort checks if a string represents a valid port -func IsPort(str string) bool { - if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 { - return true - } - return false -} - -// IsIPv4 checks if the string is an IP version 4. -func IsIPv4(str string) bool { - ip := net.ParseIP(str) - return ip != nil && strings.Contains(str, ".") -} - -// IsIPv6 checks if the string is an IP version 6. -func IsIPv6(str string) bool { - ip := net.ParseIP(str) - return ip != nil && strings.Contains(str, ":") -} - -// IsCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6) -func IsCIDR(str string) bool { - _, _, err := net.ParseCIDR(str) - return err == nil -} - -// IsMAC checks if a string is valid MAC address. -// Possible MAC formats: -// 01:23:45:67:89:ab -// 01:23:45:67:89:ab:cd:ef -// 01-23-45-67-89-ab -// 01-23-45-67-89-ab-cd-ef -// 0123.4567.89ab -// 0123.4567.89ab.cdef -func IsMAC(str string) bool { - _, err := net.ParseMAC(str) - return err == nil -} - -// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name -func IsHost(str string) bool { - return IsIP(str) || IsDNSName(str) -} - -// IsMongoID checks if the string is a valid hex-encoded representation of a MongoDB ObjectId. -func IsMongoID(str string) bool { - return rxHexadecimal.MatchString(str) && (len(str) == 24) -} - -// IsLatitude checks if a string is valid latitude. -func IsLatitude(str string) bool { - return rxLatitude.MatchString(str) -} - -// IsLongitude checks if a string is valid longitude. -func IsLongitude(str string) bool { - return rxLongitude.MatchString(str) -} - -// IsIMEI checks if a string is valid IMEI -func IsIMEI(str string) bool { - return rxIMEI.MatchString(str) -} - -// IsIMSI checks if a string is valid IMSI -func IsIMSI(str string) bool { - if !rxIMSI.MatchString(str) { - return false - } - - mcc, err := strconv.ParseInt(str[0:3], 10, 32) - if err != nil { - return false - } - - switch mcc { - case 202, 204, 206, 208, 212, 213, 214, 216, 218, 219: - case 220, 221, 222, 226, 228, 230, 231, 232, 234, 235: - case 238, 240, 242, 244, 246, 247, 248, 250, 255, 257: - case 259, 260, 262, 266, 268, 270, 272, 274, 276, 278: - case 280, 282, 283, 284, 286, 288, 289, 290, 292, 293: - case 294, 295, 297, 302, 308, 310, 311, 312, 313, 314: - case 315, 316, 330, 332, 334, 338, 340, 342, 344, 346: - case 348, 350, 352, 354, 356, 358, 360, 362, 363, 364: - case 365, 366, 368, 370, 372, 374, 376, 400, 401, 402: - case 404, 405, 406, 410, 412, 413, 414, 415, 416, 417: - case 418, 419, 420, 421, 422, 424, 425, 426, 427, 428: - case 429, 430, 431, 432, 434, 436, 437, 438, 440, 441: - case 450, 452, 454, 455, 456, 457, 460, 461, 466, 467: - case 470, 472, 502, 505, 510, 514, 515, 520, 525, 528: - case 530, 536, 537, 539, 540, 541, 542, 543, 544, 545: - case 546, 547, 548, 549, 550, 551, 552, 553, 554, 555: - case 602, 603, 604, 605, 606, 607, 608, 609, 610, 611: - case 612, 613, 614, 615, 616, 617, 618, 619, 620, 621: - case 622, 623, 624, 625, 626, 627, 628, 629, 630, 631: - case 632, 633, 634, 635, 636, 637, 638, 639, 640, 641: - case 642, 643, 645, 646, 647, 648, 649, 650, 651, 652: - case 653, 654, 655, 657, 658, 659, 702, 704, 706, 708: - case 710, 712, 714, 716, 722, 724, 730, 732, 734, 736: - case 738, 740, 742, 744, 746, 748, 750, 995: - return true - default: - return false - } - return true -} - -// IsRsaPublicKey checks if a string is valid public key with provided length -func IsRsaPublicKey(str string, keylen int) bool { - bb := bytes.NewBufferString(str) - pemBytes, err := ioutil.ReadAll(bb) - if err != nil { - return false - } - block, _ := pem.Decode(pemBytes) - if block != nil && block.Type != "PUBLIC KEY" { - return false - } - var der []byte - - if block != nil { - der = block.Bytes - } else { - der, err = base64.StdEncoding.DecodeString(str) - if err != nil { - return false - } - } - - key, err := x509.ParsePKIXPublicKey(der) - if err != nil { - return false - } - pubkey, ok := key.(*rsa.PublicKey) - if !ok { - return false - } - bitlen := len(pubkey.N.Bytes()) * 8 - return bitlen == int(keylen) -} - -// IsRegex checks if a give string is a valid regex with RE2 syntax or not -func IsRegex(str string) bool { - if _, err := regexp.Compile(str); err == nil { - return true - } - return false -} - -func toJSONName(tag string) string { - if tag == "" { - return "" - } - - // JSON name always comes first. If there's no options then split[0] is - // JSON name, if JSON name is not set, then split[0] is an empty string. - split := strings.SplitN(tag, ",", 2) - - name := split[0] - - // However it is possible that the field is skipped when - // (de-)serializing from/to JSON, in which case assume that there is no - // tag name to use - if name == "-" { - return "" - } - return name -} - -func prependPathToErrors(err error, path string) error { - switch err2 := err.(type) { - case Error: - err2.Path = append([]string{path}, err2.Path...) - return err2 - case Errors: - errors := err2.Errors() - for i, err3 := range errors { - errors[i] = prependPathToErrors(err3, path) - } - return err2 - } - return err -} - -// ValidateArray performs validation according to condition iterator that validates every element of the array -func ValidateArray(array []interface{}, iterator ConditionIterator) bool { - return Every(array, iterator) -} - -// ValidateMap use validation map for fields. -// result will be equal to `false` if there are any errors. -// s is the map containing the data to be validated. -// m is the validation map in the form: -// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}} -func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) { - if s == nil { - return true, nil - } - result := true - var err error - var errs Errors - var index int - val := reflect.ValueOf(s) - for key, value := range s { - presentResult := true - validator, ok := m[key] - if !ok { - presentResult = false - var err error - err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key) - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - valueField := reflect.ValueOf(value) - mapResult := true - typeResult := true - structResult := true - resultField := true - switch subValidator := validator.(type) { - case map[string]interface{}: - var err error - if v, ok := value.(map[string]interface{}); !ok { - mapResult = false - err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String()) - err = prependPathToErrors(err, key) - errs = append(errs, err) - } else { - mapResult, err = ValidateMap(v, subValidator) - if err != nil { - mapResult = false - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - } - case string: - if (valueField.Kind() == reflect.Struct || - (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && - subValidator != "-" { - var err error - structResult, err = ValidateStruct(valueField.Interface()) - if err != nil { - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - } - resultField, err = typeCheck(valueField, reflect.StructField{ - Name: key, - PkgPath: "", - Type: val.Type(), - Tag: reflect.StructTag(fmt.Sprintf("%s:%q", tagName, subValidator)), - Offset: 0, - Index: []int{index}, - Anonymous: false, - }, val, nil) - if err != nil { - errs = append(errs, err) - } - case nil: - // already handlerd when checked before - default: - typeResult = false - err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String()) - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - result = result && presentResult && typeResult && resultField && structResult && mapResult - index++ - } - // checks required keys - requiredResult := true - for key, value := range m { - if schema, ok := value.(string); ok { - tags := parseTagIntoMap(schema) - if required, ok := tags["required"]; ok { - if _, ok := s[key]; !ok { - requiredResult = false - if required.customErrorMessage != "" { - err = Error{key, fmt.Errorf(required.customErrorMessage), true, "required", []string{}} - } else { - err = Error{key, fmt.Errorf("required field missing"), false, "required", []string{}} - } - errs = append(errs, err) - } - } - } - } - - if len(errs) > 0 { - err = errs - } - return result && requiredResult, err -} - -// ValidateStruct use tags for fields. -// result will be equal to `false` if there are any errors. -// todo currently there is no guarantee that errors will be returned in predictable order (tests may to fail) -func ValidateStruct(s interface{}) (bool, error) { - if s == nil { - return true, nil - } - result := true - var err error - val := reflect.ValueOf(s) - if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { - val = val.Elem() - } - // we only accept structs - if val.Kind() != reflect.Struct { - return false, fmt.Errorf("function only accepts structs; got %s", val.Kind()) - } - var errs Errors - for i := 0; i < val.NumField(); i++ { - valueField := val.Field(i) - typeField := val.Type().Field(i) - if typeField.PkgPath != "" { - continue // Private field - } - structResult := true - if valueField.Kind() == reflect.Interface { - valueField = valueField.Elem() - } - if (valueField.Kind() == reflect.Struct || - (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && - typeField.Tag.Get(tagName) != "-" { - var err error - structResult, err = ValidateStruct(valueField.Interface()) - if err != nil { - err = prependPathToErrors(err, typeField.Name) - errs = append(errs, err) - } - } - resultField, err2 := typeCheck(valueField, typeField, val, nil) - if err2 != nil { - - // Replace structure name with JSON name if there is a tag on the variable - jsonTag := toJSONName(typeField.Tag.Get("json")) - if jsonTag != "" { - switch jsonError := err2.(type) { - case Error: - jsonError.Name = jsonTag - err2 = jsonError - case Errors: - for i2, err3 := range jsonError { - switch customErr := err3.(type) { - case Error: - customErr.Name = jsonTag - jsonError[i2] = customErr - } - } - - err2 = jsonError - } - } - - errs = append(errs, err2) - } - result = result && resultField && structResult - } - if len(errs) > 0 { - err = errs - } - return result, err -} - -// ValidateStructAsync performs async validation of the struct and returns results through the channels -func ValidateStructAsync(s interface{}) (<-chan bool, <-chan error) { - res := make(chan bool) - errors := make(chan error) - - go func() { - defer close(res) - defer close(errors) - - isValid, isFailed := ValidateStruct(s) - - res <- isValid - errors <- isFailed - }() - - return res, errors -} - -// ValidateMapAsync performs async validation of the map and returns results through the channels -func ValidateMapAsync(s map[string]interface{}, m map[string]interface{}) (<-chan bool, <-chan error) { - res := make(chan bool) - errors := make(chan error) - - go func() { - defer close(res) - defer close(errors) - - isValid, isFailed := ValidateMap(s, m) - - res <- isValid - errors <- isFailed - }() - - return res, errors -} - -// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} -func parseTagIntoMap(tag string) tagOptionsMap { - optionsMap := make(tagOptionsMap) - options := strings.Split(tag, ",") - - for i, option := range options { - option = strings.TrimSpace(option) - - validationOptions := strings.Split(option, "~") - if !isValidTag(validationOptions[0]) { - continue - } - if len(validationOptions) == 2 { - optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i} - } else { - optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i} - } - } - return optionsMap -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -// IsSSN will validate the given string as a U.S. Social Security Number -func IsSSN(str string) bool { - if str == "" || len(str) != 11 { - return false - } - return rxSSN.MatchString(str) -} - -// IsSemver checks if string is valid semantic version -func IsSemver(str string) bool { - return rxSemver.MatchString(str) -} - -// IsType checks if interface is of some type -func IsType(v interface{}, params ...string) bool { - if len(params) == 1 { - typ := params[0] - return strings.Replace(reflect.TypeOf(v).String(), " ", "", -1) == strings.Replace(typ, " ", "", -1) - } - return false -} - -// IsTime checks if string is valid according to given format -func IsTime(str string, format string) bool { - _, err := time.Parse(format, str) - return err == nil -} - -// IsUnixTime checks if string is valid unix timestamp value -func IsUnixTime(str string) bool { - if _, err := strconv.Atoi(str); err == nil { - return true - } - return false -} - -// IsRFC3339 checks if string is valid timestamp value according to RFC3339 -func IsRFC3339(str string) bool { - return IsTime(str, time.RFC3339) -} - -// IsRFC3339WithoutZone checks if string is valid timestamp value according to RFC3339 which excludes the timezone. -func IsRFC3339WithoutZone(str string) bool { - return IsTime(str, rfc3339WithoutZone) -} - -// IsISO4217 checks if string is valid ISO currency code -func IsISO4217(str string) bool { - for _, currency := range ISO4217List { - if str == currency { - return true - } - } - - return false -} - -// ByteLength checks string's length -func ByteLength(str string, params ...string) bool { - if len(params) == 2 { - min, _ := ToInt(params[0]) - max, _ := ToInt(params[1]) - return len(str) >= int(min) && len(str) <= int(max) - } - - return false -} - -// RuneLength checks string's length -// Alias for StringLength -func RuneLength(str string, params ...string) bool { - return StringLength(str, params...) -} - -// IsRsaPub checks whether string is valid RSA key -// Alias for IsRsaPublicKey -func IsRsaPub(str string, params ...string) bool { - if len(params) == 1 { - len, _ := ToInt(params[0]) - return IsRsaPublicKey(str, int(len)) - } - - return false -} - -// StringMatches checks if a string matches a given pattern. -func StringMatches(s string, params ...string) bool { - if len(params) == 1 { - pattern := params[0] - return Matches(s, pattern) - } - return false -} - -// StringLength checks string's length (including multi byte strings) -func StringLength(str string, params ...string) bool { - - if len(params) == 2 { - strLength := utf8.RuneCountInString(str) - min, _ := ToInt(params[0]) - max, _ := ToInt(params[1]) - return strLength >= int(min) && strLength <= int(max) - } - - return false -} - -// MinStringLength checks string's minimum length (including multi byte strings) -func MinStringLength(str string, params ...string) bool { - - if len(params) == 1 { - strLength := utf8.RuneCountInString(str) - min, _ := ToInt(params[0]) - return strLength >= int(min) - } - - return false -} - -// MaxStringLength checks string's maximum length (including multi byte strings) -func MaxStringLength(str string, params ...string) bool { - - if len(params) == 1 { - strLength := utf8.RuneCountInString(str) - max, _ := ToInt(params[0]) - return strLength <= int(max) - } - - return false -} - -// Range checks string's length -func Range(str string, params ...string) bool { - if len(params) == 2 { - value, _ := ToFloat(str) - min, _ := ToFloat(params[0]) - max, _ := ToFloat(params[1]) - return InRange(value, min, max) - } - - return false -} - -// IsInRaw checks if string is in list of allowed values -func IsInRaw(str string, params ...string) bool { - if len(params) == 1 { - rawParams := params[0] - - parsedParams := strings.Split(rawParams, "|") - - return IsIn(str, parsedParams...) - } - - return false -} - -// IsIn checks if string str is a member of the set of strings params -func IsIn(str string, params ...string) bool { - for _, param := range params { - if str == param { - return true - } - } - - return false -} - -func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { - if nilPtrAllowedByRequired { - k := v.Kind() - if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() { - return true, nil - } - } - - if requiredOption, isRequired := options["required"]; isRequired { - if len(requiredOption.customErrorMessage) > 0 { - return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}} - } - return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}} - } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { - return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}} - } - // not required and empty is valid - return true, nil -} - -func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) { - if !v.IsValid() { - return false, nil - } - - tag := t.Tag.Get(tagName) - - // checks if the field should be ignored - switch tag { - case "": - if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { - if !fieldsRequiredByDefault { - return true, nil - } - return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}} - } - case "-": - return true, nil - } - - isRootType := false - if options == nil { - isRootType = true - options = parseTagIntoMap(tag) - } - - if isEmptyValue(v) { - // an empty value is not validated, checks only required - isValid, resultErr = checkRequired(v, t, options) - for key := range options { - delete(options, key) - } - return isValid, resultErr - } - - var customTypeErrors Errors - optionsOrder := options.orderedKeys() - for _, validatorName := range optionsOrder { - validatorStruct := options[validatorName] - if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok { - delete(options, validatorName) - - if result := validatefunc(v.Interface(), o.Interface()); !result { - if len(validatorStruct.customErrorMessage) > 0 { - customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) - continue - } - customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) - } - } - } - - if len(customTypeErrors.Errors()) > 0 { - return false, customTypeErrors - } - - if isRootType { - // Ensure that we've checked the value by all specified validators before report that the value is valid - defer func() { - delete(options, "optional") - delete(options, "required") - - if isValid && resultErr == nil && len(options) != 0 { - optionsOrder := options.orderedKeys() - for _, validator := range optionsOrder { - isValid = false - resultErr = Error{t.Name, fmt.Errorf( - "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}} - return - } - } - }() - } - - for _, validatorSpec := range optionsOrder { - validatorStruct := options[validatorSpec] - var negate bool - validator := validatorSpec - customMsgExists := len(validatorStruct.customErrorMessage) > 0 - - // checks whether the tag looks like '!something' or 'something' - if validator[0] == '!' { - validator = validator[1:] - negate = true - } - - // checks for interface param validators - for key, value := range InterfaceParamTagRegexMap { - ps := value.FindStringSubmatch(validator) - if len(ps) == 0 { - continue - } - - validatefunc, ok := InterfaceParamTagMap[key] - if !ok { - continue - } - - delete(options, validatorSpec) - - field := fmt.Sprint(v) - if result := validatefunc(v.Interface(), ps[1:]...); (!result && !negate) || (result && negate) { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - } - } - - switch v.Kind() { - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Float32, reflect.Float64, - reflect.String: - // for each tag option checks the map of validator functions - for _, validatorSpec := range optionsOrder { - validatorStruct := options[validatorSpec] - var negate bool - validator := validatorSpec - customMsgExists := len(validatorStruct.customErrorMessage) > 0 - - // checks whether the tag looks like '!something' or 'something' - if validator[0] == '!' { - validator = validator[1:] - negate = true - } - - // checks for param validators - for key, value := range ParamTagRegexMap { - ps := value.FindStringSubmatch(validator) - if len(ps) == 0 { - continue - } - - validatefunc, ok := ParamTagMap[key] - if !ok { - continue - } - - delete(options, validatorSpec) - - switch v.Kind() { - case reflect.String, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - - field := fmt.Sprint(v) // make value into string, then validate with regex - if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - default: - // type not yet supported, fail - return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}} - } - } - - if validatefunc, ok := TagMap[validator]; ok { - delete(options, validatorSpec) - - switch v.Kind() { - case reflect.String, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - field := fmt.Sprint(v) // make value into string, then validate with regex - if result := validatefunc(field); !result && !negate || result && negate { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - default: - //Not Yet Supported Types (Fail here!) - err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) - return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}} - } - } - } - return true, nil - case reflect.Map: - if v.Type().Key().Kind() != reflect.String { - return false, &UnsupportedTypeError{v.Type()} - } - var sv stringValues - sv = v.MapKeys() - sort.Sort(sv) - result := true - for i, k := range sv { - var resultItem bool - var err error - if v.MapIndex(k).Kind() != reflect.Struct { - resultItem, err = typeCheck(v.MapIndex(k), t, o, options) - if err != nil { - return false, err - } - } else { - resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) - if err != nil { - err = prependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) - return false, err - } - } - result = result && resultItem - } - return result, nil - case reflect.Slice, reflect.Array: - result := true - for i := 0; i < v.Len(); i++ { - var resultItem bool - var err error - if v.Index(i).Kind() != reflect.Struct { - resultItem, err = typeCheck(v.Index(i), t, o, options) - if err != nil { - return false, err - } - } else { - resultItem, err = ValidateStruct(v.Index(i).Interface()) - if err != nil { - err = prependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) - return false, err - } - } - result = result && resultItem - } - return result, nil - case reflect.Interface: - // If the value is an interface then encode its element - if v.IsNil() { - return true, nil - } - return ValidateStruct(v.Interface()) - case reflect.Ptr: - // If the value is a pointer then checks its element - if v.IsNil() { - return true, nil - } - return typeCheck(v.Elem(), t, o, options) - case reflect.Struct: - return true, nil - default: - return false, &UnsupportedTypeError{v.Type()} - } -} - -func stripParams(validatorString string) string { - return paramsRegexp.ReplaceAllString(validatorString, "") -} - -// isEmptyValue checks whether value empty or not -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.String, reflect.Array: - return v.Len() == 0 - case reflect.Map, reflect.Slice: - return v.Len() == 0 || v.IsNil() - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - - return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) -} - -// ErrorByField returns error for specified field of the struct -// validated by ValidateStruct or empty string if there are no errors -// or this field doesn't exists or doesn't have any errors. -func ErrorByField(e error, field string) string { - if e == nil { - return "" - } - return ErrorsByField(e)[field] -} - -// ErrorsByField returns map of errors of the struct validated -// by ValidateStruct or empty map if there are no errors. -func ErrorsByField(e error) map[string]string { - m := make(map[string]string) - if e == nil { - return m - } - // prototype for ValidateStruct - - switch e := e.(type) { - case Error: - m[e.Name] = e.Err.Error() - case Errors: - for _, item := range e.Errors() { - n := ErrorsByField(item) - for k, v := range n { - m[k] = v - } - } - } - - return m -} - -// Error returns string equivalent for reflect.Type -func (e *UnsupportedTypeError) Error() string { - return "validator: unsupported type: " + e.Type.String() -} - -func (sv stringValues) Len() int { return len(sv) } -func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } -func (sv stringValues) get(i int) string { return sv[i].String() } - -func IsE164(str string) bool { - return rxE164.MatchString(str) -} diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml deleted file mode 100644 index bc5f7b0864..0000000000 --- a/vendor/github.com/asaskevich/govalidator/wercker.yml +++ /dev/null @@ -1,15 +0,0 @@ -box: golang -build: - steps: - - setup-go-workspace - - - script: - name: go get - code: | - go version - go get -t ./... - - - script: - name: go test - code: | - go test -race -v ./... diff --git a/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt deleted file mode 100644 index 899129ecc4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go deleted file mode 100644 index 2264200c16..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ /dev/null @@ -1,208 +0,0 @@ -package aws - -import ( - "net/http" - - smithybearer "github.com/aws/smithy-go/auth/bearer" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// HTTPClient provides the interface to provide custom HTTPClients. Generally -// *http.Client is sufficient for most use cases. The HTTPClient should not -// follow 301 or 302 redirects. -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// A Config provides service configuration for service clients. -type Config struct { - // The region to send requests to. This parameter is required and must - // be configured globally or on a per-client basis unless otherwise - // noted. A full list of regions is found in the "Regions and Endpoints" - // document. - // - // See http://docs.aws.amazon.com/general/latest/gr/rande.html for - // information on AWS regions. - Region string - - // The credentials object to use when signing requests. - // Use the LoadDefaultConfig to load configuration from all the SDK's supported - // sources, and resolve credentials using the SDK's default credential chain. - Credentials CredentialsProvider - - // The Bearer Authentication token provider to use for authenticating API - // operation calls with a Bearer Authentication token. The API clients and - // operation must support Bearer Authentication scheme in order for the - // token provider to be used. API clients created with NewFromConfig will - // automatically be configured with this option, if the API client support - // Bearer Authentication. - // - // The SDK's config.LoadDefaultConfig can automatically populate this - // option for external configuration options such as SSO session. - // https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html - BearerAuthTokenProvider smithybearer.TokenProvider - - // The HTTP Client the SDK's API clients will use to invoke HTTP requests. - // The SDK defaults to a BuildableClient allowing API clients to create - // copies of the HTTP Client for service specific customizations. - // - // Use a (*http.Client) for custom behavior. Using a custom http.Client - // will prevent the SDK from modifying the HTTP client. - HTTPClient HTTPClient - - // An endpoint resolver that can be used to provide or override an endpoint - // for the given service and region. - // - // See the `aws.EndpointResolver` documentation for additional usage - // information. - // - // Deprecated: See Config.EndpointResolverWithOptions - EndpointResolver EndpointResolver - - // An endpoint resolver that can be used to provide or override an endpoint - // for the given service and region. - // - // When EndpointResolverWithOptions is specified, it will be used by a - // service client rather than using EndpointResolver if also specified. - // - // See the `aws.EndpointResolverWithOptions` documentation for additional - // usage information. - // - // Deprecated: with the release of endpoint resolution v2 in API clients, - // EndpointResolver and EndpointResolverWithOptions are deprecated. - // Providing a value for this field will likely prevent you from using - // newer endpoint-related service features. See API client options - // EndpointResolverV2 and BaseEndpoint. - EndpointResolverWithOptions EndpointResolverWithOptions - - // RetryMaxAttempts specifies the maximum number attempts an API client - // will call an operation that fails with a retryable error. - // - // API Clients will only use this value to construct a retryer if the - // Config.Retryer member is not nil. This value will be ignored if - // Retryer is not nil. - RetryMaxAttempts int - - // RetryMode specifies the retry model the API client will be created with. - // - // API Clients will only use this value to construct a retryer if the - // Config.Retryer member is not nil. This value will be ignored if - // Retryer is not nil. - RetryMode RetryMode - - // Retryer is a function that provides a Retryer implementation. A Retryer - // guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. - // - // In general, the provider function should return a new instance of a - // Retryer if you are attempting to provide a consistent Retryer - // configuration across all clients. This will ensure that each client will - // be provided a new instance of the Retryer implementation, and will avoid - // issues such as sharing the same retry token bucket across services. - // - // If not nil, RetryMaxAttempts, and RetryMode will be ignored by API - // clients. - Retryer func() Retryer - - // ConfigSources are the sources that were used to construct the Config. - // Allows for additional configuration to be loaded by clients. - ConfigSources []interface{} - - // APIOptions provides the set of middleware mutations modify how the API - // client requests will be handled. This is useful for adding additional - // tracing data to a request, or changing behavior of the SDK's client. - APIOptions []func(*middleware.Stack) error - - // The logger writer interface to write logging messages to. Defaults to - // standard error. - Logger logging.Logger - - // Configures the events that will be sent to the configured logger. This - // can be used to configure the logging of signing, retries, request, and - // responses of the SDK clients. - // - // See the ClientLogMode type documentation for the complete set of logging - // modes and available configuration. - ClientLogMode ClientLogMode - - // The configured DefaultsMode. If not specified, service clients will - // default to legacy. - // - // Supported modes are: auto, cross-region, in-region, legacy, mobile, - // standard - DefaultsMode DefaultsMode - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode - // is set to DefaultsModeAuto and is initialized by - // `config.LoadDefaultConfig`. You should not populate this structure - // programmatically, or rely on the values here within your applications. - RuntimeEnvironment RuntimeEnvironment - - // AppId is an optional application specific identifier that can be set. - // When set it will be appended to the User-Agent header of every request - // in the form of App/{AppId}. This variable is sourced from environment - // variable AWS_SDK_UA_APP_ID or the shared config profile attribute sdk_ua_app_id. - // See https://docs.aws.amazon.com/sdkref/latest/guide/settings-reference.html for - // more information on environment variables and shared config settings. - AppID string - - // BaseEndpoint is an intermediary transfer location to a service specific - // BaseEndpoint on a service's Options. - BaseEndpoint *string - - // DisableRequestCompression toggles if an operation request could be - // compressed or not. Will be set to false by default. This variable is sourced from - // environment variable AWS_DISABLE_REQUEST_COMPRESSION or the shared config profile attribute - // disable_request_compression - DisableRequestCompression bool - - // RequestMinCompressSizeBytes sets the inclusive min bytes of a request body that could be - // compressed. Will be set to 10240 by default and must be within 0 and 10485760 bytes inclusively. - // This variable is sourced from environment variable AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES or - // the shared config profile attribute request_min_compression_size_bytes - RequestMinCompressSizeBytes int64 -} - -// NewConfig returns a new Config pointer that can be chained with builder -// methods to set multiple configuration values inline without using pointers. -func NewConfig() *Config { - return &Config{} -} - -// Copy will return a shallow copy of the Config object. -func (c Config) Copy() Config { - cp := c - return cp -} - -// EndpointDiscoveryEnableState indicates if endpoint discovery is -// enabled, disabled, auto or unset state. -// -// Default behavior (Auto or Unset) indicates operations that require endpoint -// discovery will use Endpoint Discovery by default. Operations that -// optionally use Endpoint Discovery will not use Endpoint Discovery -// unless EndpointDiscovery is explicitly enabled. -type EndpointDiscoveryEnableState uint - -// Enumeration values for EndpointDiscoveryEnableState -const ( - // EndpointDiscoveryUnset represents EndpointDiscoveryEnableState is unset. - // Users do not need to use this value explicitly. The behavior for unset - // is the same as for EndpointDiscoveryAuto. - EndpointDiscoveryUnset EndpointDiscoveryEnableState = iota - - // EndpointDiscoveryAuto represents an AUTO state that allows endpoint - // discovery only when required by the api. This is the default - // configuration resolved by the client if endpoint discovery is neither - // enabled or disabled. - EndpointDiscoveryAuto // default state - - // EndpointDiscoveryDisabled indicates client MUST not perform endpoint - // discovery even when required. - EndpointDiscoveryDisabled - - // EndpointDiscoveryEnabled indicates client MUST always perform endpoint - // discovery if supported for the operation. - EndpointDiscoveryEnabled -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go deleted file mode 100644 index 4d8e26ef32..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go +++ /dev/null @@ -1,22 +0,0 @@ -package aws - -import ( - "context" - "time" -) - -type suppressedContext struct { - context.Context -} - -func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { - return time.Time{}, false -} - -func (s *suppressedContext) Done() <-chan struct{} { - return nil -} - -func (s *suppressedContext) Err() error { - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go deleted file mode 100644 index 781ac0ae2c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go +++ /dev/null @@ -1,224 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "sync/atomic" - "time" - - sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" - "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight" -) - -// CredentialsCacheOptions are the options -type CredentialsCacheOptions struct { - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // An ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. This can cause an - // increased number of requests to refresh the credentials to occur. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // ExpiryWindowJitterFrac provides a mechanism for randomizing the - // expiration of credentials within the configured ExpiryWindow by a random - // percentage. Valid values are between 0.0 and 1.0. - // - // As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac - // is 0.5 then credentials will be set to expire between 30 to 60 seconds - // prior to their actual expiration time. - // - // If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored. - // If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window. - // If ExpiryWindowJitterFrac < 0 the value will be treated as 0. - // If ExpiryWindowJitterFrac > 1 the value will be treated as 1. - ExpiryWindowJitterFrac float64 -} - -// CredentialsCache provides caching and concurrency safe credentials retrieval -// via the provider's retrieve method. -// -// CredentialsCache will look for optional interfaces on the Provider to adjust -// how the credential cache handles credentials caching. -// -// - HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle -// credential refresh failures. This could return an updated Credentials -// value, or attempt another means of retrieving credentials. -// -// - AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how -// credentials Expires is modified. This could modify how the Credentials -// Expires is adjusted based on the CredentialsCache ExpiryWindow option. -// Such as providing a floor not to reduce the Expires below. -type CredentialsCache struct { - provider CredentialsProvider - - options CredentialsCacheOptions - creds atomic.Value - sf singleflight.Group -} - -// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider -// is expected to not be nil. A variadic list of one or more functions can be -// provided to modify the CredentialsCache configuration. This allows for -// configuration of credential expiry window and jitter. -func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache { - options := CredentialsCacheOptions{} - - for _, fn := range optFns { - fn(&options) - } - - if options.ExpiryWindow < 0 { - options.ExpiryWindow = 0 - } - - if options.ExpiryWindowJitterFrac < 0 { - options.ExpiryWindowJitterFrac = 0 - } else if options.ExpiryWindowJitterFrac > 1 { - options.ExpiryWindowJitterFrac = 1 - } - - return &CredentialsCache{ - provider: provider, - options: options, - } -} - -// Retrieve returns the credentials. If the credentials have already been -// retrieved, and not expired the cached credentials will be returned. If the -// credentials have not been retrieved yet, or expired the provider's Retrieve -// method will be called. -// -// Returns and error if the provider's retrieve method returns an error. -func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) { - if creds, ok := p.getCreds(); ok && !creds.Expired() { - return creds, nil - } - - resCh := p.sf.DoChan("", func() (interface{}, error) { - return p.singleRetrieve(&suppressedContext{ctx}) - }) - select { - case res := <-resCh: - return res.Val.(Credentials), res.Err - case <-ctx.Done(): - return Credentials{}, &RequestCanceledError{Err: ctx.Err()} - } -} - -func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) { - currCreds, ok := p.getCreds() - if ok && !currCreds.Expired() { - return currCreds, nil - } - - newCreds, err := p.provider.Retrieve(ctx) - if err != nil { - handleFailToRefresh := defaultHandleFailToRefresh - if cs, ok := p.provider.(HandleFailRefreshCredentialsCacheStrategy); ok { - handleFailToRefresh = cs.HandleFailToRefresh - } - newCreds, err = handleFailToRefresh(ctx, currCreds, err) - if err != nil { - return Credentials{}, fmt.Errorf("failed to refresh cached credentials, %w", err) - } - } - - if newCreds.CanExpire && p.options.ExpiryWindow > 0 { - adjustExpiresBy := defaultAdjustExpiresBy - if cs, ok := p.provider.(AdjustExpiresByCredentialsCacheStrategy); ok { - adjustExpiresBy = cs.AdjustExpiresBy - } - - randFloat64, err := sdkrand.CryptoRandFloat64() - if err != nil { - return Credentials{}, fmt.Errorf("failed to get random provider, %w", err) - } - - var jitter time.Duration - if p.options.ExpiryWindowJitterFrac > 0 { - jitter = time.Duration(randFloat64 * - p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow)) - } - - newCreds, err = adjustExpiresBy(newCreds, -(p.options.ExpiryWindow - jitter)) - if err != nil { - return Credentials{}, fmt.Errorf("failed to adjust credentials expires, %w", err) - } - } - - p.creds.Store(&newCreds) - return newCreds, nil -} - -// getCreds returns the currently stored credentials and true. Returning false -// if no credentials were stored. -func (p *CredentialsCache) getCreds() (Credentials, bool) { - v := p.creds.Load() - if v == nil { - return Credentials{}, false - } - - c := v.(*Credentials) - if c == nil || !c.HasKeys() { - return Credentials{}, false - } - - return *c, true -} - -// Invalidate will invalidate the cached credentials. The next call to Retrieve -// will cause the provider's Retrieve method to be called. -func (p *CredentialsCache) Invalidate() { - p.creds.Store((*Credentials)(nil)) -} - -// IsCredentialsProvider returns whether credential provider wrapped by CredentialsCache -// matches the target provider type. -func (p *CredentialsCache) IsCredentialsProvider(target CredentialsProvider) bool { - return IsCredentialsProvider(p.provider, target) -} - -// HandleFailRefreshCredentialsCacheStrategy is an interface for -// CredentialsCache to allow CredentialsProvider how failed to refresh -// credentials is handled. -type HandleFailRefreshCredentialsCacheStrategy interface { - // Given the previously cached Credentials, if any, and refresh error, may - // returns new or modified set of Credentials, or error. - // - // Credential caches may use default implementation if nil. - HandleFailToRefresh(context.Context, Credentials, error) (Credentials, error) -} - -// defaultHandleFailToRefresh returns the passed in error. -func defaultHandleFailToRefresh(ctx context.Context, _ Credentials, err error) (Credentials, error) { - return Credentials{}, err -} - -// AdjustExpiresByCredentialsCacheStrategy is an interface for CredentialCache -// to allow CredentialsProvider to intercept adjustments to Credentials expiry -// based on expectations and use cases of CredentialsProvider. -// -// Credential caches may use default implementation if nil. -type AdjustExpiresByCredentialsCacheStrategy interface { - // Given a Credentials as input, applying any mutations and - // returning the potentially updated Credentials, or error. - AdjustExpiresBy(Credentials, time.Duration) (Credentials, error) -} - -// defaultAdjustExpiresBy adds the duration to the passed in credentials Expires, -// and returns the updated credentials value. If Credentials value's CanExpire -// is false, the passed in credentials are returned unchanged. -func defaultAdjustExpiresBy(creds Credentials, dur time.Duration) (Credentials, error) { - if !creds.CanExpire { - return creds, nil - } - - creds.Expires = creds.Expires.Add(dur) - return creds, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go deleted file mode 100644 index 714d4ad85c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go +++ /dev/null @@ -1,170 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "reflect" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/sdk" -) - -// AnonymousCredentials provides a sentinel CredentialsProvider that should be -// used to instruct the SDK's signing middleware to not sign the request. -// -// Using `nil` credentials when configuring an API client will achieve the same -// result. The AnonymousCredentials type allows you to configure the SDK's -// external config loading to not attempt to source credentials from the shared -// config or environment. -// -// For example you can use this CredentialsProvider with an API client's -// Options to instruct the client not to sign a request for accessing public -// S3 bucket objects. -// -// The following example demonstrates using the AnonymousCredentials to prevent -// SDK's external config loading attempt to resolve credentials. -// -// cfg, err := config.LoadDefaultConfig(context.TODO(), -// config.WithCredentialsProvider(aws.AnonymousCredentials{}), -// ) -// if err != nil { -// log.Fatalf("failed to load config, %v", err) -// } -// -// client := s3.NewFromConfig(cfg) -// -// Alternatively you can leave the API client Option's `Credential` member to -// nil. If using the `NewFromConfig` constructor you'll need to explicitly set -// the `Credentials` member to nil, if the external config resolved a -// credential provider. -// -// client := s3.New(s3.Options{ -// // Credentials defaults to a nil value. -// }) -// -// This can also be configured for specific operations calls too. -// -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// log.Fatalf("failed to load config, %v", err) -// } -// -// client := s3.NewFromConfig(config) -// -// result, err := client.GetObject(context.TODO(), s3.GetObject{ -// Bucket: aws.String("example-bucket"), -// Key: aws.String("example-key"), -// }, func(o *s3.Options) { -// o.Credentials = nil -// // Or -// o.Credentials = aws.AnonymousCredentials{} -// }) -type AnonymousCredentials struct{} - -// Retrieve implements the CredentialsProvider interface, but will always -// return error, and cannot be used to sign a request. The AnonymousCredentials -// type is used as a sentinel type instructing the AWS request signing -// middleware to not sign a request. -func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) { - return Credentials{Source: "AnonymousCredentials"}, - fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with") -} - -// A Credentials is the AWS credentials value for individual credential fields. -type Credentials struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Source of the credentials - Source string - - // States if the credentials can expire or not. - CanExpire bool - - // The time the credentials will expire at. Should be ignored if CanExpire - // is false. - Expires time.Time -} - -// Expired returns if the credentials have expired. -func (v Credentials) Expired() bool { - if v.CanExpire { - // Calling Round(0) on the current time will truncate the monotonic - // reading only. Ensures credential expiry time is always based on - // reported wall-clock time. - return !v.Expires.After(sdk.NowTime().Round(0)) - } - - return false -} - -// HasKeys returns if the credentials keys are set. -func (v Credentials) HasKeys() bool { - return len(v.AccessKeyID) > 0 && len(v.SecretAccessKey) > 0 -} - -// A CredentialsProvider is the interface for any component which will provide -// credentials Credentials. A CredentialsProvider is required to manage its own -// Expired state, and what to be expired means. -// -// A credentials provider implementation can be wrapped with a CredentialCache -// to cache the credential value retrieved. Without the cache the SDK will -// attempt to retrieve the credentials for every request. -type CredentialsProvider interface { - // Retrieve returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve(ctx context.Context) (Credentials, error) -} - -// CredentialsProviderFunc provides a helper wrapping a function value to -// satisfy the CredentialsProvider interface. -type CredentialsProviderFunc func(context.Context) (Credentials, error) - -// Retrieve delegates to the function value the CredentialsProviderFunc wraps. -func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) { - return fn(ctx) -} - -type isCredentialsProvider interface { - IsCredentialsProvider(CredentialsProvider) bool -} - -// IsCredentialsProvider returns whether the target CredentialProvider is the same type as provider when comparing the -// implementation type. -// -// If provider has a method IsCredentialsProvider(CredentialsProvider) bool it will be responsible for validating -// whether target matches the credential provider type. -// -// When comparing the CredentialProvider implementations provider and target for equality, the following rules are used: -// -// If provider is of type T and target is of type V, true if type *T is the same as type *V, otherwise false -// If provider is of type *T and target is of type V, true if type *T is the same as type *V, otherwise false -// If provider is of type T and target is of type *V, true if type *T is the same as type *V, otherwise false -// If provider is of type *T and target is of type *V,true if type *T is the same as type *V, otherwise false -func IsCredentialsProvider(provider, target CredentialsProvider) bool { - if target == nil || provider == nil { - return provider == target - } - - if x, ok := provider.(isCredentialsProvider); ok { - return x.IsCredentialsProvider(target) - } - - targetType := reflect.TypeOf(target) - if targetType.Kind() != reflect.Ptr { - targetType = reflect.PtrTo(targetType) - } - - providerType := reflect.TypeOf(provider) - if providerType.Kind() != reflect.Ptr { - providerType = reflect.PtrTo(providerType) - } - - return targetType.AssignableTo(providerType) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go deleted file mode 100644 index fd408e5186..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go +++ /dev/null @@ -1,38 +0,0 @@ -package defaults - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - "runtime" - "strings" -) - -var getGOOS = func() string { - return runtime.GOOS -} - -// ResolveDefaultsModeAuto is used to determine the effective aws.DefaultsMode when the mode -// is set to aws.DefaultsModeAuto. -func ResolveDefaultsModeAuto(region string, environment aws.RuntimeEnvironment) aws.DefaultsMode { - goos := getGOOS() - if goos == "android" || goos == "ios" { - return aws.DefaultsModeMobile - } - - var currentRegion string - if len(environment.EnvironmentIdentifier) > 0 { - currentRegion = environment.Region - } - - if len(currentRegion) == 0 && len(environment.EC2InstanceMetadataRegion) > 0 { - currentRegion = environment.EC2InstanceMetadataRegion - } - - if len(region) > 0 && len(currentRegion) > 0 { - if strings.EqualFold(region, currentRegion) { - return aws.DefaultsModeInRegion - } - return aws.DefaultsModeCrossRegion - } - - return aws.DefaultsModeStandard -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go deleted file mode 100644 index 8b7e01fa29..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go +++ /dev/null @@ -1,43 +0,0 @@ -package defaults - -import ( - "time" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// Configuration is the set of SDK configuration options that are determined based -// on the configured DefaultsMode. -type Configuration struct { - // RetryMode is the configuration's default retry mode API clients should - // use for constructing a Retryer. - RetryMode aws.RetryMode - - // ConnectTimeout is the maximum amount of time a dial will wait for - // a connect to complete. - // - // See https://pkg.go.dev/net#Dialer.Timeout - ConnectTimeout *time.Duration - - // TLSNegotiationTimeout specifies the maximum amount of time waiting to - // wait for a TLS handshake. - // - // See https://pkg.go.dev/net/http#Transport.TLSHandshakeTimeout - TLSNegotiationTimeout *time.Duration -} - -// GetConnectTimeout returns the ConnectTimeout value, returns false if the value is not set. -func (c *Configuration) GetConnectTimeout() (time.Duration, bool) { - if c.ConnectTimeout == nil { - return 0, false - } - return *c.ConnectTimeout, true -} - -// GetTLSNegotiationTimeout returns the TLSNegotiationTimeout value, returns false if the value is not set. -func (c *Configuration) GetTLSNegotiationTimeout() (time.Duration, bool) { - if c.TLSNegotiationTimeout == nil { - return 0, false - } - return *c.TLSNegotiationTimeout, true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go deleted file mode 100644 index dbaa873dc8..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsconfig. DO NOT EDIT. - -package defaults - -import ( - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "time" -) - -// GetModeConfiguration returns the default Configuration descriptor for the given mode. -// -// Supports the following modes: cross-region, in-region, mobile, standard -func GetModeConfiguration(mode aws.DefaultsMode) (Configuration, error) { - var mv aws.DefaultsMode - mv.SetFromString(string(mode)) - - switch mv { - case aws.DefaultsModeCrossRegion: - settings := Configuration{ - ConnectTimeout: aws.Duration(3100 * time.Millisecond), - RetryMode: aws.RetryMode("standard"), - TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), - } - return settings, nil - case aws.DefaultsModeInRegion: - settings := Configuration{ - ConnectTimeout: aws.Duration(1100 * time.Millisecond), - RetryMode: aws.RetryMode("standard"), - TLSNegotiationTimeout: aws.Duration(1100 * time.Millisecond), - } - return settings, nil - case aws.DefaultsModeMobile: - settings := Configuration{ - ConnectTimeout: aws.Duration(30000 * time.Millisecond), - RetryMode: aws.RetryMode("standard"), - TLSNegotiationTimeout: aws.Duration(30000 * time.Millisecond), - } - return settings, nil - case aws.DefaultsModeStandard: - settings := Configuration{ - ConnectTimeout: aws.Duration(3100 * time.Millisecond), - RetryMode: aws.RetryMode("standard"), - TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), - } - return settings, nil - default: - return Configuration{}, fmt.Errorf("unsupported defaults mode: %v", mode) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go deleted file mode 100644 index 2d90011b42..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package defaults provides recommended configuration values for AWS SDKs and CLIs. -package defaults diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go deleted file mode 100644 index fcf9387c28..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go +++ /dev/null @@ -1,95 +0,0 @@ -// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsmode. DO NOT EDIT. - -package aws - -import ( - "strings" -) - -// DefaultsMode is the SDK defaults mode setting. -type DefaultsMode string - -// The DefaultsMode constants. -const ( - // DefaultsModeAuto is an experimental mode that builds on the standard mode. - // The SDK will attempt to discover the execution environment to determine the - // appropriate settings automatically. - // - // Note that the auto detection is heuristics-based and does not guarantee 100% - // accuracy. STANDARD mode will be used if the execution environment cannot - // be determined. The auto detection might query EC2 Instance Metadata service - // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html), - // which might introduce latency. Therefore we recommend choosing an explicit - // defaults_mode instead if startup latency is critical to your application - DefaultsModeAuto DefaultsMode = "auto" - - // DefaultsModeCrossRegion builds on the standard mode and includes optimization - // tailored for applications which call AWS services in a different region - // - // Note that the default values vended from this mode might change as best practices - // may evolve. As a result, it is encouraged to perform tests when upgrading - // the SDK - DefaultsModeCrossRegion DefaultsMode = "cross-region" - - // DefaultsModeInRegion builds on the standard mode and includes optimization - // tailored for applications which call AWS services from within the same AWS - // region - // - // Note that the default values vended from this mode might change as best practices - // may evolve. As a result, it is encouraged to perform tests when upgrading - // the SDK - DefaultsModeInRegion DefaultsMode = "in-region" - - // DefaultsModeLegacy provides default settings that vary per SDK and were used - // prior to establishment of defaults_mode - DefaultsModeLegacy DefaultsMode = "legacy" - - // DefaultsModeMobile builds on the standard mode and includes optimization - // tailored for mobile applications - // - // Note that the default values vended from this mode might change as best practices - // may evolve. As a result, it is encouraged to perform tests when upgrading - // the SDK - DefaultsModeMobile DefaultsMode = "mobile" - - // DefaultsModeStandard provides the latest recommended default values that - // should be safe to run in most scenarios - // - // Note that the default values vended from this mode might change as best practices - // may evolve. As a result, it is encouraged to perform tests when upgrading - // the SDK - DefaultsModeStandard DefaultsMode = "standard" -) - -// SetFromString sets the DefaultsMode value to one of the pre-defined constants that matches -// the provided string when compared using EqualFold. If the value does not match a known -// constant it will be set to as-is and the function will return false. As a special case, if the -// provided value is a zero-length string, the mode will be set to LegacyDefaultsMode. -func (d *DefaultsMode) SetFromString(v string) (ok bool) { - switch { - case strings.EqualFold(v, string(DefaultsModeAuto)): - *d = DefaultsModeAuto - ok = true - case strings.EqualFold(v, string(DefaultsModeCrossRegion)): - *d = DefaultsModeCrossRegion - ok = true - case strings.EqualFold(v, string(DefaultsModeInRegion)): - *d = DefaultsModeInRegion - ok = true - case strings.EqualFold(v, string(DefaultsModeLegacy)): - *d = DefaultsModeLegacy - ok = true - case strings.EqualFold(v, string(DefaultsModeMobile)): - *d = DefaultsModeMobile - ok = true - case strings.EqualFold(v, string(DefaultsModeStandard)): - *d = DefaultsModeStandard - ok = true - case len(v) == 0: - *d = DefaultsModeLegacy - ok = true - default: - *d = DefaultsMode(v) - } - return ok -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go deleted file mode 100644 index d8b6e09e59..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go +++ /dev/null @@ -1,62 +0,0 @@ -// Package aws provides the core SDK's utilities and shared types. Use this package's -// utilities to simplify setting and reading API operations parameters. -// -// # Value and Pointer Conversion Utilities -// -// This package includes a helper conversion utility for each scalar type the SDK's -// API use. These utilities make getting a pointer of the scalar, and dereferencing -// a pointer easier. -// -// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. -// The Pointer to value will safely dereference the pointer and return its value. -// If the pointer was nil, the scalar's zero value will be returned. -// -// The value to pointer functions will be named after the scalar type. So get a -// *string from a string value use the "String" function. This makes it easy to -// to get pointer of a literal string value, because getting the address of a -// literal requires assigning the value to a variable first. -// -// var strPtr *string -// -// // Without the SDK's conversion functions -// str := "my string" -// strPtr = &str -// -// // With the SDK's conversion functions -// strPtr = aws.String("my string") -// -// // Convert *string to string value -// str = aws.ToString(strPtr) -// -// In addition to scalars the aws package also includes conversion utilities for -// map and slice for commonly types used in API parameters. The map and slice -// conversion functions use similar naming pattern as the scalar conversion -// functions. -// -// var strPtrs []*string -// var strs []string = []string{"Go", "Gophers", "Go"} -// -// // Convert []string to []*string -// strPtrs = aws.StringSlice(strs) -// -// // Convert []*string to []string -// strs = aws.ToStringSlice(strPtrs) -// -// # SDK Default HTTP Client -// -// The SDK will use the http.DefaultClient if a HTTP client is not provided to -// the SDK's Session, or service client constructor. This means that if the -// http.DefaultClient is modified by other components of your application the -// modifications will be picked up by the SDK as well. -// -// In some cases this might be intended, but it is a better practice to create -// a custom HTTP Client to share explicitly through your application. You can -// configure the SDK to use the custom HTTP Client by setting the HTTPClient -// value of the SDK's Config type when creating a Session or service client. -package aws - -// generate.go uses a build tag of "ignore", go run doesn't need to specify -// this because go run ignores all build flags when running a go file directly. -//go:generate go run -tags codegen generate.go -//go:generate go run -tags codegen logging_generate.go -//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go deleted file mode 100644 index aa10a9b40f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go +++ /dev/null @@ -1,229 +0,0 @@ -package aws - -import ( - "fmt" -) - -// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution behavior. -type DualStackEndpointState uint - -const ( - // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint resolution. - DualStackEndpointStateUnset DualStackEndpointState = iota - - // DualStackEndpointStateEnabled enables dual-stack endpoint resolution for service endpoints. - DualStackEndpointStateEnabled - - // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. - DualStackEndpointStateDisabled -) - -// GetUseDualStackEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. -// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. -func GetUseDualStackEndpoint(options ...interface{}) (value DualStackEndpointState, found bool) { - type iface interface { - GetUseDualStackEndpoint() DualStackEndpointState - } - for _, option := range options { - if i, ok := option.(iface); ok { - value = i.GetUseDualStackEndpoint() - found = true - break - } - } - return value, found -} - -// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. -type FIPSEndpointState uint - -const ( - // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. - FIPSEndpointStateUnset FIPSEndpointState = iota - - // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. - FIPSEndpointStateEnabled - - // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. - FIPSEndpointStateDisabled -) - -// GetUseFIPSEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. -// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. -func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found bool) { - type iface interface { - GetUseFIPSEndpoint() FIPSEndpointState - } - for _, option := range options { - if i, ok := option.(iface); ok { - value = i.GetUseFIPSEndpoint() - found = true - break - } - } - return value, found -} - -// Endpoint represents the endpoint a service client should make API operation -// calls to. -// -// The SDK will automatically resolve these endpoints per API client using an -// internal endpoint resolvers. If you'd like to provide custom endpoint -// resolving behavior you can implement the EndpointResolver interface. -type Endpoint struct { - // The base URL endpoint the SDK API clients will use to make API calls to. - // The SDK will suffix URI path and query elements to this endpoint. - URL string - - // Specifies if the endpoint's hostname can be modified by the SDK's API - // client. - // - // If the hostname is mutable the SDK API clients may modify any part of - // the hostname based on the requirements of the API, (e.g. adding, or - // removing content in the hostname). Such as, Amazon S3 API client - // prefixing "bucketname" to the hostname, or changing the - // hostname service name component from "s3." to "s3-accesspoint.dualstack." - // for the dualstack endpoint of an S3 Accesspoint resource. - // - // Care should be taken when providing a custom endpoint for an API. If the - // endpoint hostname is mutable, and the client cannot modify the endpoint - // correctly, the operation call will most likely fail, or have undefined - // behavior. - // - // If hostname is immutable, the SDK API clients will not modify the - // hostname of the URL. This may cause the API client not to function - // correctly if the API requires the operation specific hostname values - // to be used by the client. - // - // This flag does not modify the API client's behavior if this endpoint - // will be used instead of Endpoint Discovery, or if the endpoint will be - // used to perform Endpoint Discovery. That behavior is configured via the - // API Client's Options. - HostnameImmutable bool - - // The AWS partition the endpoint belongs to. - PartitionID string - - // The service name that should be used for signing the requests to the - // endpoint. - SigningName string - - // The region that should be used for signing the request to the endpoint. - SigningRegion string - - // The signing method that should be used for signing the requests to the - // endpoint. - SigningMethod string - - // The source of the Endpoint. By default, this will be EndpointSourceServiceMetadata. - // When providing a custom endpoint, you should set the source as EndpointSourceCustom. - // If source is not provided when providing a custom endpoint, the SDK may not - // perform required host mutations correctly. Source should be used along with - // HostnameImmutable property as per the usage requirement. - Source EndpointSource -} - -// EndpointSource is the endpoint source type. -type EndpointSource int - -const ( - // EndpointSourceServiceMetadata denotes service modeled endpoint metadata is used as Endpoint Source. - EndpointSourceServiceMetadata EndpointSource = iota - - // EndpointSourceCustom denotes endpoint is a custom endpoint. This source should be used when - // user provides a custom endpoint to be used by the SDK. - EndpointSourceCustom -) - -// EndpointNotFoundError is a sentinel error to indicate that the -// EndpointResolver implementation was unable to resolve an endpoint for the -// given service and region. Resolvers should use this to indicate that an API -// client should fallback and attempt to use it's internal default resolver to -// resolve the endpoint. -type EndpointNotFoundError struct { - Err error -} - -// Error is the error message. -func (e *EndpointNotFoundError) Error() string { - return fmt.Sprintf("endpoint not found, %v", e.Err) -} - -// Unwrap returns the underlying error. -func (e *EndpointNotFoundError) Unwrap() error { - return e.Err -} - -// EndpointResolver is an endpoint resolver that can be used to provide or -// override an endpoint for the given service and region. API clients will -// attempt to use the EndpointResolver first to resolve an endpoint if -// available. If the EndpointResolver returns an EndpointNotFoundError error, -// API clients will fallback to attempting to resolve the endpoint using its -// internal default endpoint resolver. -// -// Deprecated: See EndpointResolverWithOptions -type EndpointResolver interface { - ResolveEndpoint(service, region string) (Endpoint, error) -} - -// EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface. -// -// Deprecated: See EndpointResolverWithOptionsFunc -type EndpointResolverFunc func(service, region string) (Endpoint, error) - -// ResolveEndpoint calls the wrapped function and returns the results. -// -// Deprecated: See EndpointResolverWithOptions.ResolveEndpoint -func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) { - return e(service, region) -} - -// EndpointResolverWithOptions is an endpoint resolver that can be used to provide or -// override an endpoint for the given service, region, and the service client's EndpointOptions. API clients will -// attempt to use the EndpointResolverWithOptions first to resolve an endpoint if -// available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error, -// API clients will fallback to attempting to resolve the endpoint using its -// internal default endpoint resolver. -type EndpointResolverWithOptions interface { - ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) -} - -// EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface. -type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error) - -// ResolveEndpoint calls the wrapped function and returns the results. -func (e EndpointResolverWithOptionsFunc) ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) { - return e(service, region, options...) -} - -// GetDisableHTTPS takes a service's EndpointResolverOptions and returns the DisableHTTPS value. -// Returns boolean false if the provided options does not have a method to retrieve the DisableHTTPS. -func GetDisableHTTPS(options ...interface{}) (value bool, found bool) { - type iface interface { - GetDisableHTTPS() bool - } - for _, option := range options { - if i, ok := option.(iface); ok { - value = i.GetDisableHTTPS() - found = true - break - } - } - return value, found -} - -// GetResolvedRegion takes a service's EndpointResolverOptions and returns the ResolvedRegion value. -// Returns boolean false if the provided options does not have a method to retrieve the ResolvedRegion. -func GetResolvedRegion(options ...interface{}) (value string, found bool) { - type iface interface { - GetResolvedRegion() string - } - for _, option := range options { - if i, ok := option.(iface); ok { - value = i.GetResolvedRegion() - found = true - break - } - } - return value, found -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go deleted file mode 100644 index f390a08f9f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go +++ /dev/null @@ -1,9 +0,0 @@ -package aws - -// MissingRegionError is an error that is returned if region configuration -// value was not found. -type MissingRegionError struct{} - -func (*MissingRegionError) Error() string { - return "an AWS region is required, but was not found" -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go deleted file mode 100644 index 2394418e9b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go +++ /dev/null @@ -1,365 +0,0 @@ -// Code generated by aws/generate.go DO NOT EDIT. - -package aws - -import ( - "github.com/aws/smithy-go/ptr" - "time" -) - -// ToBool returns bool value dereferenced if the passed -// in pointer was not nil. Returns a bool zero value if the -// pointer was nil. -func ToBool(p *bool) (v bool) { - return ptr.ToBool(p) -} - -// ToBoolSlice returns a slice of bool values, that are -// dereferenced if the passed in pointer was not nil. Returns a bool -// zero value if the pointer was nil. -func ToBoolSlice(vs []*bool) []bool { - return ptr.ToBoolSlice(vs) -} - -// ToBoolMap returns a map of bool values, that are -// dereferenced if the passed in pointer was not nil. The bool -// zero value is used if the pointer was nil. -func ToBoolMap(vs map[string]*bool) map[string]bool { - return ptr.ToBoolMap(vs) -} - -// ToByte returns byte value dereferenced if the passed -// in pointer was not nil. Returns a byte zero value if the -// pointer was nil. -func ToByte(p *byte) (v byte) { - return ptr.ToByte(p) -} - -// ToByteSlice returns a slice of byte values, that are -// dereferenced if the passed in pointer was not nil. Returns a byte -// zero value if the pointer was nil. -func ToByteSlice(vs []*byte) []byte { - return ptr.ToByteSlice(vs) -} - -// ToByteMap returns a map of byte values, that are -// dereferenced if the passed in pointer was not nil. The byte -// zero value is used if the pointer was nil. -func ToByteMap(vs map[string]*byte) map[string]byte { - return ptr.ToByteMap(vs) -} - -// ToString returns string value dereferenced if the passed -// in pointer was not nil. Returns a string zero value if the -// pointer was nil. -func ToString(p *string) (v string) { - return ptr.ToString(p) -} - -// ToStringSlice returns a slice of string values, that are -// dereferenced if the passed in pointer was not nil. Returns a string -// zero value if the pointer was nil. -func ToStringSlice(vs []*string) []string { - return ptr.ToStringSlice(vs) -} - -// ToStringMap returns a map of string values, that are -// dereferenced if the passed in pointer was not nil. The string -// zero value is used if the pointer was nil. -func ToStringMap(vs map[string]*string) map[string]string { - return ptr.ToStringMap(vs) -} - -// ToInt returns int value dereferenced if the passed -// in pointer was not nil. Returns a int zero value if the -// pointer was nil. -func ToInt(p *int) (v int) { - return ptr.ToInt(p) -} - -// ToIntSlice returns a slice of int values, that are -// dereferenced if the passed in pointer was not nil. Returns a int -// zero value if the pointer was nil. -func ToIntSlice(vs []*int) []int { - return ptr.ToIntSlice(vs) -} - -// ToIntMap returns a map of int values, that are -// dereferenced if the passed in pointer was not nil. The int -// zero value is used if the pointer was nil. -func ToIntMap(vs map[string]*int) map[string]int { - return ptr.ToIntMap(vs) -} - -// ToInt8 returns int8 value dereferenced if the passed -// in pointer was not nil. Returns a int8 zero value if the -// pointer was nil. -func ToInt8(p *int8) (v int8) { - return ptr.ToInt8(p) -} - -// ToInt8Slice returns a slice of int8 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int8 -// zero value if the pointer was nil. -func ToInt8Slice(vs []*int8) []int8 { - return ptr.ToInt8Slice(vs) -} - -// ToInt8Map returns a map of int8 values, that are -// dereferenced if the passed in pointer was not nil. The int8 -// zero value is used if the pointer was nil. -func ToInt8Map(vs map[string]*int8) map[string]int8 { - return ptr.ToInt8Map(vs) -} - -// ToInt16 returns int16 value dereferenced if the passed -// in pointer was not nil. Returns a int16 zero value if the -// pointer was nil. -func ToInt16(p *int16) (v int16) { - return ptr.ToInt16(p) -} - -// ToInt16Slice returns a slice of int16 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int16 -// zero value if the pointer was nil. -func ToInt16Slice(vs []*int16) []int16 { - return ptr.ToInt16Slice(vs) -} - -// ToInt16Map returns a map of int16 values, that are -// dereferenced if the passed in pointer was not nil. The int16 -// zero value is used if the pointer was nil. -func ToInt16Map(vs map[string]*int16) map[string]int16 { - return ptr.ToInt16Map(vs) -} - -// ToInt32 returns int32 value dereferenced if the passed -// in pointer was not nil. Returns a int32 zero value if the -// pointer was nil. -func ToInt32(p *int32) (v int32) { - return ptr.ToInt32(p) -} - -// ToInt32Slice returns a slice of int32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int32 -// zero value if the pointer was nil. -func ToInt32Slice(vs []*int32) []int32 { - return ptr.ToInt32Slice(vs) -} - -// ToInt32Map returns a map of int32 values, that are -// dereferenced if the passed in pointer was not nil. The int32 -// zero value is used if the pointer was nil. -func ToInt32Map(vs map[string]*int32) map[string]int32 { - return ptr.ToInt32Map(vs) -} - -// ToInt64 returns int64 value dereferenced if the passed -// in pointer was not nil. Returns a int64 zero value if the -// pointer was nil. -func ToInt64(p *int64) (v int64) { - return ptr.ToInt64(p) -} - -// ToInt64Slice returns a slice of int64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int64 -// zero value if the pointer was nil. -func ToInt64Slice(vs []*int64) []int64 { - return ptr.ToInt64Slice(vs) -} - -// ToInt64Map returns a map of int64 values, that are -// dereferenced if the passed in pointer was not nil. The int64 -// zero value is used if the pointer was nil. -func ToInt64Map(vs map[string]*int64) map[string]int64 { - return ptr.ToInt64Map(vs) -} - -// ToUint returns uint value dereferenced if the passed -// in pointer was not nil. Returns a uint zero value if the -// pointer was nil. -func ToUint(p *uint) (v uint) { - return ptr.ToUint(p) -} - -// ToUintSlice returns a slice of uint values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint -// zero value if the pointer was nil. -func ToUintSlice(vs []*uint) []uint { - return ptr.ToUintSlice(vs) -} - -// ToUintMap returns a map of uint values, that are -// dereferenced if the passed in pointer was not nil. The uint -// zero value is used if the pointer was nil. -func ToUintMap(vs map[string]*uint) map[string]uint { - return ptr.ToUintMap(vs) -} - -// ToUint8 returns uint8 value dereferenced if the passed -// in pointer was not nil. Returns a uint8 zero value if the -// pointer was nil. -func ToUint8(p *uint8) (v uint8) { - return ptr.ToUint8(p) -} - -// ToUint8Slice returns a slice of uint8 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint8 -// zero value if the pointer was nil. -func ToUint8Slice(vs []*uint8) []uint8 { - return ptr.ToUint8Slice(vs) -} - -// ToUint8Map returns a map of uint8 values, that are -// dereferenced if the passed in pointer was not nil. The uint8 -// zero value is used if the pointer was nil. -func ToUint8Map(vs map[string]*uint8) map[string]uint8 { - return ptr.ToUint8Map(vs) -} - -// ToUint16 returns uint16 value dereferenced if the passed -// in pointer was not nil. Returns a uint16 zero value if the -// pointer was nil. -func ToUint16(p *uint16) (v uint16) { - return ptr.ToUint16(p) -} - -// ToUint16Slice returns a slice of uint16 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint16 -// zero value if the pointer was nil. -func ToUint16Slice(vs []*uint16) []uint16 { - return ptr.ToUint16Slice(vs) -} - -// ToUint16Map returns a map of uint16 values, that are -// dereferenced if the passed in pointer was not nil. The uint16 -// zero value is used if the pointer was nil. -func ToUint16Map(vs map[string]*uint16) map[string]uint16 { - return ptr.ToUint16Map(vs) -} - -// ToUint32 returns uint32 value dereferenced if the passed -// in pointer was not nil. Returns a uint32 zero value if the -// pointer was nil. -func ToUint32(p *uint32) (v uint32) { - return ptr.ToUint32(p) -} - -// ToUint32Slice returns a slice of uint32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint32 -// zero value if the pointer was nil. -func ToUint32Slice(vs []*uint32) []uint32 { - return ptr.ToUint32Slice(vs) -} - -// ToUint32Map returns a map of uint32 values, that are -// dereferenced if the passed in pointer was not nil. The uint32 -// zero value is used if the pointer was nil. -func ToUint32Map(vs map[string]*uint32) map[string]uint32 { - return ptr.ToUint32Map(vs) -} - -// ToUint64 returns uint64 value dereferenced if the passed -// in pointer was not nil. Returns a uint64 zero value if the -// pointer was nil. -func ToUint64(p *uint64) (v uint64) { - return ptr.ToUint64(p) -} - -// ToUint64Slice returns a slice of uint64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint64 -// zero value if the pointer was nil. -func ToUint64Slice(vs []*uint64) []uint64 { - return ptr.ToUint64Slice(vs) -} - -// ToUint64Map returns a map of uint64 values, that are -// dereferenced if the passed in pointer was not nil. The uint64 -// zero value is used if the pointer was nil. -func ToUint64Map(vs map[string]*uint64) map[string]uint64 { - return ptr.ToUint64Map(vs) -} - -// ToFloat32 returns float32 value dereferenced if the passed -// in pointer was not nil. Returns a float32 zero value if the -// pointer was nil. -func ToFloat32(p *float32) (v float32) { - return ptr.ToFloat32(p) -} - -// ToFloat32Slice returns a slice of float32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a float32 -// zero value if the pointer was nil. -func ToFloat32Slice(vs []*float32) []float32 { - return ptr.ToFloat32Slice(vs) -} - -// ToFloat32Map returns a map of float32 values, that are -// dereferenced if the passed in pointer was not nil. The float32 -// zero value is used if the pointer was nil. -func ToFloat32Map(vs map[string]*float32) map[string]float32 { - return ptr.ToFloat32Map(vs) -} - -// ToFloat64 returns float64 value dereferenced if the passed -// in pointer was not nil. Returns a float64 zero value if the -// pointer was nil. -func ToFloat64(p *float64) (v float64) { - return ptr.ToFloat64(p) -} - -// ToFloat64Slice returns a slice of float64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a float64 -// zero value if the pointer was nil. -func ToFloat64Slice(vs []*float64) []float64 { - return ptr.ToFloat64Slice(vs) -} - -// ToFloat64Map returns a map of float64 values, that are -// dereferenced if the passed in pointer was not nil. The float64 -// zero value is used if the pointer was nil. -func ToFloat64Map(vs map[string]*float64) map[string]float64 { - return ptr.ToFloat64Map(vs) -} - -// ToTime returns time.Time value dereferenced if the passed -// in pointer was not nil. Returns a time.Time zero value if the -// pointer was nil. -func ToTime(p *time.Time) (v time.Time) { - return ptr.ToTime(p) -} - -// ToTimeSlice returns a slice of time.Time values, that are -// dereferenced if the passed in pointer was not nil. Returns a time.Time -// zero value if the pointer was nil. -func ToTimeSlice(vs []*time.Time) []time.Time { - return ptr.ToTimeSlice(vs) -} - -// ToTimeMap returns a map of time.Time values, that are -// dereferenced if the passed in pointer was not nil. The time.Time -// zero value is used if the pointer was nil. -func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { - return ptr.ToTimeMap(vs) -} - -// ToDuration returns time.Duration value dereferenced if the passed -// in pointer was not nil. Returns a time.Duration zero value if the -// pointer was nil. -func ToDuration(p *time.Duration) (v time.Duration) { - return ptr.ToDuration(p) -} - -// ToDurationSlice returns a slice of time.Duration values, that are -// dereferenced if the passed in pointer was not nil. Returns a time.Duration -// zero value if the pointer was nil. -func ToDurationSlice(vs []*time.Duration) []time.Duration { - return ptr.ToDurationSlice(vs) -} - -// ToDurationMap returns a map of time.Duration values, that are -// dereferenced if the passed in pointer was not nil. The time.Duration -// zero value is used if the pointer was nil. -func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { - return ptr.ToDurationMap(vs) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go deleted file mode 100644 index 847cc51a98..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package aws - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.26.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go deleted file mode 100644 index 91c94d987b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go +++ /dev/null @@ -1,119 +0,0 @@ -// Code generated by aws/logging_generate.go DO NOT EDIT. - -package aws - -// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where -// each bit is a flag that describes the logging behavior for one or more client components. -// The entire 64-bit group is reserved for later expansion by the SDK. -// -// Example: Setting ClientLogMode to enable logging of retries and requests -// -// clientLogMode := aws.LogRetries | aws.LogRequest -// -// Example: Adding an additional log mode to an existing ClientLogMode value -// -// clientLogMode |= aws.LogResponse -type ClientLogMode uint64 - -// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. -const ( - LogSigning ClientLogMode = 1 << (64 - 1 - iota) - LogRetries - LogRequest - LogRequestWithBody - LogResponse - LogResponseWithBody - LogDeprecatedUsage - LogRequestEventMessage - LogResponseEventMessage -) - -// IsSigning returns whether the Signing logging mode bit is set -func (m ClientLogMode) IsSigning() bool { - return m&LogSigning != 0 -} - -// IsRetries returns whether the Retries logging mode bit is set -func (m ClientLogMode) IsRetries() bool { - return m&LogRetries != 0 -} - -// IsRequest returns whether the Request logging mode bit is set -func (m ClientLogMode) IsRequest() bool { - return m&LogRequest != 0 -} - -// IsRequestWithBody returns whether the RequestWithBody logging mode bit is set -func (m ClientLogMode) IsRequestWithBody() bool { - return m&LogRequestWithBody != 0 -} - -// IsResponse returns whether the Response logging mode bit is set -func (m ClientLogMode) IsResponse() bool { - return m&LogResponse != 0 -} - -// IsResponseWithBody returns whether the ResponseWithBody logging mode bit is set -func (m ClientLogMode) IsResponseWithBody() bool { - return m&LogResponseWithBody != 0 -} - -// IsDeprecatedUsage returns whether the DeprecatedUsage logging mode bit is set -func (m ClientLogMode) IsDeprecatedUsage() bool { - return m&LogDeprecatedUsage != 0 -} - -// IsRequestEventMessage returns whether the RequestEventMessage logging mode bit is set -func (m ClientLogMode) IsRequestEventMessage() bool { - return m&LogRequestEventMessage != 0 -} - -// IsResponseEventMessage returns whether the ResponseEventMessage logging mode bit is set -func (m ClientLogMode) IsResponseEventMessage() bool { - return m&LogResponseEventMessage != 0 -} - -// ClearSigning clears the Signing logging mode bit -func (m *ClientLogMode) ClearSigning() { - *m &^= LogSigning -} - -// ClearRetries clears the Retries logging mode bit -func (m *ClientLogMode) ClearRetries() { - *m &^= LogRetries -} - -// ClearRequest clears the Request logging mode bit -func (m *ClientLogMode) ClearRequest() { - *m &^= LogRequest -} - -// ClearRequestWithBody clears the RequestWithBody logging mode bit -func (m *ClientLogMode) ClearRequestWithBody() { - *m &^= LogRequestWithBody -} - -// ClearResponse clears the Response logging mode bit -func (m *ClientLogMode) ClearResponse() { - *m &^= LogResponse -} - -// ClearResponseWithBody clears the ResponseWithBody logging mode bit -func (m *ClientLogMode) ClearResponseWithBody() { - *m &^= LogResponseWithBody -} - -// ClearDeprecatedUsage clears the DeprecatedUsage logging mode bit -func (m *ClientLogMode) ClearDeprecatedUsage() { - *m &^= LogDeprecatedUsage -} - -// ClearRequestEventMessage clears the RequestEventMessage logging mode bit -func (m *ClientLogMode) ClearRequestEventMessage() { - *m &^= LogRequestEventMessage -} - -// ClearResponseEventMessage clears the ResponseEventMessage logging mode bit -func (m *ClientLogMode) ClearResponseEventMessage() { - *m &^= LogResponseEventMessage -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go deleted file mode 100644 index 6ecc2231a1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go +++ /dev/null @@ -1,95 +0,0 @@ -//go:build clientlogmode -// +build clientlogmode - -package main - -import ( - "fmt" - "log" - "os" - "strings" - "text/template" -) - -var config = struct { - ModeBits []string -}{ - // Items should be appended only to keep bit-flag positions stable - ModeBits: []string{ - "Signing", - "Retries", - "Request", - "RequestWithBody", - "Response", - "ResponseWithBody", - "DeprecatedUsage", - "RequestEventMessage", - "ResponseEventMessage", - }, -} - -func bitName(name string) string { - return strings.ToUpper(name[:1]) + name[1:] -} - -var tmpl = template.Must(template.New("ClientLogMode").Funcs(map[string]interface{}{ - "symbolName": func(name string) string { - return "Log" + bitName(name) - }, - "bitName": bitName, -}).Parse(`// Code generated by aws/logging_generate.go DO NOT EDIT. - -package aws - -// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where -// each bit is a flag that describes the logging behavior for one or more client components. -// The entire 64-bit group is reserved for later expansion by the SDK. -// -// Example: Setting ClientLogMode to enable logging of retries and requests -// clientLogMode := aws.LogRetries | aws.LogRequest -// -// Example: Adding an additional log mode to an existing ClientLogMode value -// clientLogMode |= aws.LogResponse -type ClientLogMode uint64 - -// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. -const ( -{{- range $index, $field := .ModeBits }} - {{ (symbolName $field) }}{{- if (eq 0 $index) }} ClientLogMode = 1 << (64 - 1 - iota){{- end }} -{{- end }} -) -{{ range $_, $field := .ModeBits }} -// Is{{- bitName $field }} returns whether the {{ bitName $field }} logging mode bit is set -func (m ClientLogMode) Is{{- bitName $field }}() bool { - return m&{{- (symbolName $field) }} != 0 -} -{{ end }} -{{- range $_, $field := .ModeBits }} -// Clear{{- bitName $field }} clears the {{ bitName $field }} logging mode bit -func (m *ClientLogMode) Clear{{- bitName $field }}() { - *m &^= {{ (symbolName $field) }} -} -{{ end -}} -`)) - -func main() { - uniqueBitFields := make(map[string]struct{}) - - for _, bitName := range config.ModeBits { - if _, ok := uniqueBitFields[strings.ToLower(bitName)]; ok { - panic(fmt.Sprintf("duplicate bit field: %s", bitName)) - } - uniqueBitFields[bitName] = struct{}{} - } - - file, err := os.Create("logging.go") - if err != nil { - log.Fatal(err) - } - defer file.Close() - - err = tmpl.Execute(file, config) - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go deleted file mode 100644 index d66f0960aa..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go +++ /dev/null @@ -1,213 +0,0 @@ -package middleware - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/aws" - - "github.com/aws/smithy-go/middleware" -) - -// RegisterServiceMetadata registers metadata about the service and operation into the middleware context -// so that it is available at runtime for other middleware to introspect. -type RegisterServiceMetadata struct { - ServiceID string - SigningName string - Region string - OperationName string -} - -// ID returns the middleware identifier. -func (s *RegisterServiceMetadata) ID() string { - return "RegisterServiceMetadata" -} - -// HandleInitialize registers service metadata information into the middleware context, allowing for introspection. -func (s RegisterServiceMetadata) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) (out middleware.InitializeOutput, metadata middleware.Metadata, err error) { - if len(s.ServiceID) > 0 { - ctx = SetServiceID(ctx, s.ServiceID) - } - if len(s.SigningName) > 0 { - ctx = SetSigningName(ctx, s.SigningName) - } - if len(s.Region) > 0 { - ctx = setRegion(ctx, s.Region) - } - if len(s.OperationName) > 0 { - ctx = setOperationName(ctx, s.OperationName) - } - return next.HandleInitialize(ctx, in) -} - -// service metadata keys for storing and lookup of runtime stack information. -type ( - serviceIDKey struct{} - signingNameKey struct{} - signingRegionKey struct{} - regionKey struct{} - operationNameKey struct{} - partitionIDKey struct{} - requiresLegacyEndpointsKey struct{} -) - -// GetServiceID retrieves the service id from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetServiceID(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, serviceIDKey{}).(string) - return v -} - -// GetSigningName retrieves the service signing name from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -// -// Deprecated: This value is unstable. The resolved signing name is available -// in the signer properties object passed to the signer. -func GetSigningName(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, signingNameKey{}).(string) - return v -} - -// GetSigningRegion retrieves the region from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -// -// Deprecated: This value is unstable. The resolved signing region is available -// in the signer properties object passed to the signer. -func GetSigningRegion(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, signingRegionKey{}).(string) - return v -} - -// GetRegion retrieves the endpoint region from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetRegion(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, regionKey{}).(string) - return v -} - -// GetOperationName retrieves the service operation metadata from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetOperationName(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, operationNameKey{}).(string) - return v -} - -// GetPartitionID retrieves the endpoint partition id from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetPartitionID(ctx context.Context) string { - v, _ := middleware.GetStackValue(ctx, partitionIDKey{}).(string) - return v -} - -// GetRequiresLegacyEndpoints the flag used to indicate if legacy endpoint -// customizations need to be executed. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetRequiresLegacyEndpoints(ctx context.Context) bool { - v, _ := middleware.GetStackValue(ctx, requiresLegacyEndpointsKey{}).(bool) - return v -} - -// SetRequiresLegacyEndpoints set or modifies the flag indicated that -// legacy endpoint customizations are needed. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetRequiresLegacyEndpoints(ctx context.Context, value bool) context.Context { - return middleware.WithStackValue(ctx, requiresLegacyEndpointsKey{}, value) -} - -// SetSigningName set or modifies the sigv4 or sigv4a signing name on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -// -// Deprecated: This value is unstable. Use WithSigV4SigningName client option -// funcs instead. -func SetSigningName(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, signingNameKey{}, value) -} - -// SetSigningRegion sets or modifies the region on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -// -// Deprecated: This value is unstable. Use WithSigV4SigningRegion client option -// funcs instead. -func SetSigningRegion(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, signingRegionKey{}, value) -} - -// SetServiceID sets the service id on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetServiceID(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, serviceIDKey{}, value) -} - -// setRegion sets the endpoint region on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func setRegion(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, regionKey{}, value) -} - -// setOperationName sets the service operation on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func setOperationName(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, operationNameKey{}, value) -} - -// SetPartitionID sets the partition id of a resolved region on the context -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetPartitionID(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, partitionIDKey{}, value) -} - -// EndpointSource key -type endpointSourceKey struct{} - -// GetEndpointSource returns an endpoint source if set on context -func GetEndpointSource(ctx context.Context) (v aws.EndpointSource) { - v, _ = middleware.GetStackValue(ctx, endpointSourceKey{}).(aws.EndpointSource) - return v -} - -// SetEndpointSource sets endpoint source on context -func SetEndpointSource(ctx context.Context, value aws.EndpointSource) context.Context { - return middleware.WithStackValue(ctx, endpointSourceKey{}, value) -} - -type signingCredentialsKey struct{} - -// GetSigningCredentials returns the credentials that were used for signing if set on context. -func GetSigningCredentials(ctx context.Context) (v aws.Credentials) { - v, _ = middleware.GetStackValue(ctx, signingCredentialsKey{}).(aws.Credentials) - return v -} - -// SetSigningCredentials sets the credentails used for signing on the context. -func SetSigningCredentials(ctx context.Context, value aws.Credentials) context.Context { - return middleware.WithStackValue(ctx, signingCredentialsKey{}, value) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go deleted file mode 100644 index 6d5f0079c2..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go +++ /dev/null @@ -1,168 +0,0 @@ -package middleware - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/rand" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyrand "github.com/aws/smithy-go/rand" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// ClientRequestID is a Smithy BuildMiddleware that will generate a unique ID for logical API operation -// invocation. -type ClientRequestID struct{} - -// ID the identifier for the ClientRequestID -func (r *ClientRequestID) ID() string { - return "ClientRequestID" -} - -// HandleBuild attaches a unique operation invocation id for the operation to the request -func (r ClientRequestID) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", req) - } - - invocationID, err := smithyrand.NewUUID(rand.Reader).GetUUID() - if err != nil { - return out, metadata, err - } - - const invocationIDHeader = "Amz-Sdk-Invocation-Id" - req.Header[invocationIDHeader] = append(req.Header[invocationIDHeader][:0], invocationID) - - return next.HandleBuild(ctx, in) -} - -// RecordResponseTiming records the response timing for the SDK client requests. -type RecordResponseTiming struct{} - -// ID is the middleware identifier -func (a *RecordResponseTiming) ID() string { - return "RecordResponseTiming" -} - -// HandleDeserialize calculates response metadata and clock skew -func (a RecordResponseTiming) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - responseAt := sdk.NowTime() - setResponseAt(&metadata, responseAt) - - var serverTime time.Time - - switch resp := out.RawResponse.(type) { - case *smithyhttp.Response: - respDateHeader := resp.Header.Get("Date") - if len(respDateHeader) == 0 { - break - } - var parseErr error - serverTime, parseErr = smithyhttp.ParseTime(respDateHeader) - if parseErr != nil { - logger := middleware.GetLogger(ctx) - logger.Logf(logging.Warn, "failed to parse response Date header value, got %v", - parseErr.Error()) - break - } - setServerTime(&metadata, serverTime) - } - - if !serverTime.IsZero() { - attemptSkew := serverTime.Sub(responseAt) - setAttemptSkew(&metadata, attemptSkew) - } - - return out, metadata, err -} - -type responseAtKey struct{} - -// GetResponseAt returns the time response was received at. -func GetResponseAt(metadata middleware.Metadata) (v time.Time, ok bool) { - v, ok = metadata.Get(responseAtKey{}).(time.Time) - return v, ok -} - -// setResponseAt sets the response time on the metadata. -func setResponseAt(metadata *middleware.Metadata, v time.Time) { - metadata.Set(responseAtKey{}, v) -} - -type serverTimeKey struct{} - -// GetServerTime returns the server time for response. -func GetServerTime(metadata middleware.Metadata) (v time.Time, ok bool) { - v, ok = metadata.Get(serverTimeKey{}).(time.Time) - return v, ok -} - -// setServerTime sets the server time on the metadata. -func setServerTime(metadata *middleware.Metadata, v time.Time) { - metadata.Set(serverTimeKey{}, v) -} - -type attemptSkewKey struct{} - -// GetAttemptSkew returns Attempt clock skew for response from metadata. -func GetAttemptSkew(metadata middleware.Metadata) (v time.Duration, ok bool) { - v, ok = metadata.Get(attemptSkewKey{}).(time.Duration) - return v, ok -} - -// setAttemptSkew sets the attempt clock skew on the metadata. -func setAttemptSkew(metadata *middleware.Metadata, v time.Duration) { - metadata.Set(attemptSkewKey{}, v) -} - -// AddClientRequestIDMiddleware adds ClientRequestID to the middleware stack -func AddClientRequestIDMiddleware(stack *middleware.Stack) error { - return stack.Build.Add(&ClientRequestID{}, middleware.After) -} - -// AddRecordResponseTiming adds RecordResponseTiming middleware to the -// middleware stack. -func AddRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&RecordResponseTiming{}, middleware.After) -} - -// rawResponseKey is the accessor key used to store and access the -// raw response within the response metadata. -type rawResponseKey struct{} - -// AddRawResponse middleware adds raw response on to the metadata -type AddRawResponse struct{} - -// ID the identifier for the ClientRequestID -func (m *AddRawResponse) ID() string { - return "AddRawResponseToMetadata" -} - -// HandleDeserialize adds raw response on the middleware metadata -func (m AddRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - metadata.Set(rawResponseKey{}, out.RawResponse) - return out, metadata, err -} - -// AddRawResponseToMetadata adds middleware to the middleware stack that -// store raw response on to the metadata. -func AddRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&AddRawResponse{}, middleware.Before) -} - -// GetRawResponse returns raw response set on metadata -func GetRawResponse(metadata middleware.Metadata) interface{} { - return metadata.Get(rawResponseKey{}) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go deleted file mode 100644 index ba262dadcd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package middleware - -import "runtime" - -func getNormalizedOSName() (os string) { - switch runtime.GOOS { - case "android": - os = "android" - case "linux": - os = "linux" - case "windows": - os = "windows" - case "darwin": - os = "macos" - case "ios": - os = "ios" - default: - os = "other" - } - return os -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go deleted file mode 100644 index e14a1e4ecb..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build !go1.16 -// +build !go1.16 - -package middleware - -import "runtime" - -func getNormalizedOSName() (os string) { - switch runtime.GOOS { - case "android": - os = "android" - case "linux": - os = "linux" - case "windows": - os = "windows" - case "darwin": - // Due to Apple M1 we can't distinguish between macOS and iOS when GOOS/GOARCH is darwin/amd64 - // For now declare this as "other" until we have a better detection mechanism. - fallthrough - default: - os = "other" - } - return os -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go deleted file mode 100644 index b0133f4c88..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go +++ /dev/null @@ -1,319 +0,0 @@ -// Package metrics implements metrics gathering for SDK development purposes. -// -// This package is designated as private and is intended for use only by the -// AWS client runtime. The exported API therein is not considered stable and -// is subject to breaking changes without notice. -package metrics - -import ( - "context" - "encoding/json" - "fmt" - "sync" - "time" - - "github.com/aws/smithy-go/middleware" -) - -const ( - // ServiceIDKey is the key for the service ID metric. - ServiceIDKey = "ServiceId" - // OperationNameKey is the key for the operation name metric. - OperationNameKey = "OperationName" - // ClientRequestIDKey is the key for the client request ID metric. - ClientRequestIDKey = "ClientRequestId" - // APICallDurationKey is the key for the API call duration metric. - APICallDurationKey = "ApiCallDuration" - // APICallSuccessfulKey is the key for the API call successful metric. - APICallSuccessfulKey = "ApiCallSuccessful" - // MarshallingDurationKey is the key for the marshalling duration metric. - MarshallingDurationKey = "MarshallingDuration" - // InThroughputKey is the key for the input throughput metric. - InThroughputKey = "InThroughput" - // OutThroughputKey is the key for the output throughput metric. - OutThroughputKey = "OutThroughput" - // RetryCountKey is the key for the retry count metric. - RetryCountKey = "RetryCount" - // HTTPStatusCodeKey is the key for the HTTP status code metric. - HTTPStatusCodeKey = "HttpStatusCode" - // AWSExtendedRequestIDKey is the key for the AWS extended request ID metric. - AWSExtendedRequestIDKey = "AwsExtendedRequestId" - // AWSRequestIDKey is the key for the AWS request ID metric. - AWSRequestIDKey = "AwsRequestId" - // BackoffDelayDurationKey is the key for the backoff delay duration metric. - BackoffDelayDurationKey = "BackoffDelayDuration" - // StreamThroughputKey is the key for the stream throughput metric. - StreamThroughputKey = "Throughput" - // ConcurrencyAcquireDurationKey is the key for the concurrency acquire duration metric. - ConcurrencyAcquireDurationKey = "ConcurrencyAcquireDuration" - // PendingConcurrencyAcquiresKey is the key for the pending concurrency acquires metric. - PendingConcurrencyAcquiresKey = "PendingConcurrencyAcquires" - // SigningDurationKey is the key for the signing duration metric. - SigningDurationKey = "SigningDuration" - // UnmarshallingDurationKey is the key for the unmarshalling duration metric. - UnmarshallingDurationKey = "UnmarshallingDuration" - // TimeToFirstByteKey is the key for the time to first byte metric. - TimeToFirstByteKey = "TimeToFirstByte" - // ServiceCallDurationKey is the key for the service call duration metric. - ServiceCallDurationKey = "ServiceCallDuration" - // EndpointResolutionDurationKey is the key for the endpoint resolution duration metric. - EndpointResolutionDurationKey = "EndpointResolutionDuration" - // AttemptNumberKey is the key for the attempt number metric. - AttemptNumberKey = "AttemptNumber" - // MaxConcurrencyKey is the key for the max concurrency metric. - MaxConcurrencyKey = "MaxConcurrency" - // AvailableConcurrencyKey is the key for the available concurrency metric. - AvailableConcurrencyKey = "AvailableConcurrency" -) - -// MetricPublisher provides the interface to provide custom MetricPublishers. -// PostRequestMetrics will be invoked by the MetricCollection middleware to post request. -// PostStreamMetrics will be invoked by ReadCloserWithMetrics to post stream metrics. -type MetricPublisher interface { - PostRequestMetrics(*MetricData) error - PostStreamMetrics(*MetricData) error -} - -// Serializer provides the interface to provide custom Serializers. -// Serialize will transform any input object in its corresponding string representation. -type Serializer interface { - Serialize(obj interface{}) (string, error) -} - -// DefaultSerializer is an implementation of the Serializer interface. -type DefaultSerializer struct{} - -// Serialize uses the default JSON serializer to obtain the string representation of an object. -func (DefaultSerializer) Serialize(obj interface{}) (string, error) { - bytes, err := json.Marshal(obj) - if err != nil { - return "", err - } - return string(bytes), nil -} - -type metricContextKey struct{} - -// MetricContext contains fields to store metric-related information. -type MetricContext struct { - connectionCounter *SharedConnectionCounter - publisher MetricPublisher - data *MetricData -} - -// MetricData stores the collected metric data. -type MetricData struct { - RequestStartTime time.Time - RequestEndTime time.Time - APICallDuration time.Duration - SerializeStartTime time.Time - SerializeEndTime time.Time - MarshallingDuration time.Duration - ResolveEndpointStartTime time.Time - ResolveEndpointEndTime time.Time - EndpointResolutionDuration time.Duration - InThroughput float64 - OutThroughput float64 - RetryCount int - Success uint8 - StatusCode int - ClientRequestID string - ServiceID string - OperationName string - PartitionID string - Region string - RequestContentLength int64 - Stream StreamMetrics - Attempts []AttemptMetrics -} - -// StreamMetrics stores metrics related to streaming data. -type StreamMetrics struct { - ReadDuration time.Duration - ReadBytes int64 - Throughput float64 -} - -// AttemptMetrics stores metrics related to individual attempts. -type AttemptMetrics struct { - ServiceCallStart time.Time - ServiceCallEnd time.Time - ServiceCallDuration time.Duration - FirstByteTime time.Time - TimeToFirstByte time.Duration - ConnRequestedTime time.Time - ConnObtainedTime time.Time - ConcurrencyAcquireDuration time.Duration - CredentialFetchStartTime time.Time - CredentialFetchEndTime time.Time - SignStartTime time.Time - SignEndTime time.Time - SigningDuration time.Duration - DeserializeStartTime time.Time - DeserializeEndTime time.Time - UnMarshallingDuration time.Duration - RetryDelay time.Duration - ResponseContentLength int64 - StatusCode int - RequestID string - ExtendedRequestID string - HTTPClient string - MaxConcurrency int - PendingConnectionAcquires int - AvailableConcurrency int - ActiveRequests int - ReusedConnection bool -} - -// Data returns the MetricData associated with the MetricContext. -func (mc *MetricContext) Data() *MetricData { - return mc.data -} - -// ConnectionCounter returns the SharedConnectionCounter associated with the MetricContext. -func (mc *MetricContext) ConnectionCounter() *SharedConnectionCounter { - return mc.connectionCounter -} - -// Publisher returns the MetricPublisher associated with the MetricContext. -func (mc *MetricContext) Publisher() MetricPublisher { - return mc.publisher -} - -// ComputeRequestMetrics calculates and populates derived metrics based on the collected data. -func (md *MetricData) ComputeRequestMetrics() { - - for idx := range md.Attempts { - attempt := &md.Attempts[idx] - attempt.ConcurrencyAcquireDuration = attempt.ConnObtainedTime.Sub(attempt.ConnRequestedTime) - attempt.SigningDuration = attempt.SignEndTime.Sub(attempt.SignStartTime) - attempt.UnMarshallingDuration = attempt.DeserializeEndTime.Sub(attempt.DeserializeStartTime) - attempt.TimeToFirstByte = attempt.FirstByteTime.Sub(attempt.ServiceCallStart) - attempt.ServiceCallDuration = attempt.ServiceCallEnd.Sub(attempt.ServiceCallStart) - } - - md.APICallDuration = md.RequestEndTime.Sub(md.RequestStartTime) - md.MarshallingDuration = md.SerializeEndTime.Sub(md.SerializeStartTime) - md.EndpointResolutionDuration = md.ResolveEndpointEndTime.Sub(md.ResolveEndpointStartTime) - - md.RetryCount = len(md.Attempts) - 1 - - latestAttempt, err := md.LatestAttempt() - - if err != nil { - fmt.Printf("error retrieving attempts data due to: %s. Skipping Throughput metrics", err.Error()) - } else { - - md.StatusCode = latestAttempt.StatusCode - - if md.Success == 1 { - if latestAttempt.ResponseContentLength > 0 && latestAttempt.ServiceCallDuration > 0 { - md.InThroughput = float64(latestAttempt.ResponseContentLength) / latestAttempt.ServiceCallDuration.Seconds() - } - if md.RequestContentLength > 0 && latestAttempt.ServiceCallDuration > 0 { - md.OutThroughput = float64(md.RequestContentLength) / latestAttempt.ServiceCallDuration.Seconds() - } - } - } -} - -// LatestAttempt returns the latest attempt metrics. -// It returns an error if no attempts are initialized. -func (md *MetricData) LatestAttempt() (*AttemptMetrics, error) { - if md.Attempts == nil || len(md.Attempts) == 0 { - return nil, fmt.Errorf("no attempts initialized. NewAttempt() should be called first") - } - return &md.Attempts[len(md.Attempts)-1], nil -} - -// NewAttempt initializes new attempt metrics. -func (md *MetricData) NewAttempt() { - if md.Attempts == nil { - md.Attempts = []AttemptMetrics{} - } - md.Attempts = append(md.Attempts, AttemptMetrics{}) -} - -// SharedConnectionCounter is a counter shared across API calls. -type SharedConnectionCounter struct { - mu sync.Mutex - - activeRequests int - pendingConnectionAcquire int -} - -// ActiveRequests returns the count of active requests. -func (cc *SharedConnectionCounter) ActiveRequests() int { - cc.mu.Lock() - defer cc.mu.Unlock() - - return cc.activeRequests -} - -// PendingConnectionAcquire returns the count of pending connection acquires. -func (cc *SharedConnectionCounter) PendingConnectionAcquire() int { - cc.mu.Lock() - defer cc.mu.Unlock() - - return cc.pendingConnectionAcquire -} - -// AddActiveRequest increments the count of active requests. -func (cc *SharedConnectionCounter) AddActiveRequest() { - cc.mu.Lock() - defer cc.mu.Unlock() - - cc.activeRequests++ -} - -// RemoveActiveRequest decrements the count of active requests. -func (cc *SharedConnectionCounter) RemoveActiveRequest() { - cc.mu.Lock() - defer cc.mu.Unlock() - - cc.activeRequests-- -} - -// AddPendingConnectionAcquire increments the count of pending connection acquires. -func (cc *SharedConnectionCounter) AddPendingConnectionAcquire() { - cc.mu.Lock() - defer cc.mu.Unlock() - - cc.pendingConnectionAcquire++ -} - -// RemovePendingConnectionAcquire decrements the count of pending connection acquires. -func (cc *SharedConnectionCounter) RemovePendingConnectionAcquire() { - cc.mu.Lock() - defer cc.mu.Unlock() - - cc.pendingConnectionAcquire-- -} - -// InitMetricContext initializes the metric context with the provided counter and publisher. -// It returns the updated context. -func InitMetricContext( - ctx context.Context, counter *SharedConnectionCounter, publisher MetricPublisher, -) context.Context { - if middleware.GetStackValue(ctx, metricContextKey{}) == nil { - ctx = middleware.WithStackValue(ctx, metricContextKey{}, &MetricContext{ - connectionCounter: counter, - publisher: publisher, - data: &MetricData{ - Attempts: []AttemptMetrics{}, - Stream: StreamMetrics{}, - }, - }) - } - return ctx -} - -// Context returns the metric context from the given context. -// It returns nil if the metric context is not found. -func Context(ctx context.Context) *MetricContext { - mctx := middleware.GetStackValue(ctx, metricContextKey{}) - if mctx == nil { - return nil - } - return mctx.(*MetricContext) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go deleted file mode 100644 index 3f6aaf231e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go +++ /dev/null @@ -1,94 +0,0 @@ -package middleware - -import ( - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "os" -) - -const envAwsLambdaFunctionName = "AWS_LAMBDA_FUNCTION_NAME" -const envAmznTraceID = "_X_AMZN_TRACE_ID" -const amznTraceIDHeader = "X-Amzn-Trace-Id" - -// AddRecursionDetection adds recursionDetection to the middleware stack -func AddRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&RecursionDetection{}, middleware.After) -} - -// RecursionDetection detects Lambda environment and sets its X-Ray trace ID to request header if absent -// to avoid recursion invocation in Lambda -type RecursionDetection struct{} - -// ID returns the middleware identifier -func (m *RecursionDetection) ID() string { - return "RecursionDetection" -} - -// HandleBuild detects Lambda environment and adds its trace ID to request header if absent -func (m *RecursionDetection) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - _, hasLambdaEnv := os.LookupEnv(envAwsLambdaFunctionName) - xAmznTraceID, hasTraceID := os.LookupEnv(envAmznTraceID) - value := req.Header.Get(amznTraceIDHeader) - // only set the X-Amzn-Trace-Id header when it is not set initially, the - // current environment is Lambda and the _X_AMZN_TRACE_ID env variable exists - if value != "" || !hasLambdaEnv || !hasTraceID { - return next.HandleBuild(ctx, in) - } - - req.Header.Set(amznTraceIDHeader, percentEncode(xAmznTraceID)) - return next.HandleBuild(ctx, in) -} - -func percentEncode(s string) string { - upperhex := "0123456789ABCDEF" - hexCount := 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEncode(c) { - hexCount++ - } - } - - if hexCount == 0 { - return s - } - - required := len(s) + 2*hexCount - t := make([]byte, required) - j := 0 - for i := 0; i < len(s); i++ { - if c := s[i]; shouldEncode(c) { - t[j] = '%' - t[j+1] = upperhex[c>>4] - t[j+2] = upperhex[c&15] - j += 3 - } else { - t[j] = c - j++ - } - } - return string(t) -} - -func shouldEncode(c byte) bool { - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { - return false - } - switch c { - case '-', '=', ';', ':', '+', '&', '[', ']', '{', '}', '"', '\'', ',': - return false - default: - return true - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go deleted file mode 100644 index dd3391fe41..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go +++ /dev/null @@ -1,27 +0,0 @@ -package middleware - -import ( - "github.com/aws/smithy-go/middleware" -) - -// requestIDKey is used to retrieve request id from response metadata -type requestIDKey struct{} - -// SetRequestIDMetadata sets the provided request id over middleware metadata -func SetRequestIDMetadata(metadata *middleware.Metadata, id string) { - metadata.Set(requestIDKey{}, id) -} - -// GetRequestIDMetadata retrieves the request id from middleware metadata -// returns string and bool indicating value of request id, whether request id was set. -func GetRequestIDMetadata(metadata middleware.Metadata) (string, bool) { - if !metadata.Has(requestIDKey{}) { - return "", false - } - - v, ok := metadata.Get(requestIDKey{}).(string) - if !ok { - return "", true - } - return v, true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go deleted file mode 100644 index e7d268c3da..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go +++ /dev/null @@ -1,53 +0,0 @@ -package middleware - -import ( - "context" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// AddRequestIDRetrieverMiddleware adds request id retriever middleware -func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - // add error wrapper middleware before operation deserializers so that it can wrap the error response - // returned by operation deserializers - return stack.Deserialize.Insert(&RequestIDRetriever{}, "OperationDeserializer", middleware.Before) -} - -// RequestIDRetriever middleware captures the AWS service request ID from the -// raw response. -type RequestIDRetriever struct { -} - -// ID returns the middleware identifier -func (m *RequestIDRetriever) ID() string { - return "RequestIDRetriever" -} - -// HandleDeserialize pulls the AWS request ID from the response, storing it in -// operation metadata. -func (m *RequestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - // No raw response to wrap with. - return out, metadata, err - } - - // Different header which can map to request id - requestIDHeaderList := []string{"X-Amzn-Requestid", "X-Amz-RequestId"} - - for _, h := range requestIDHeaderList { - // check for headers known to contain Request id - if v := resp.Header.Get(h); len(v) != 0 { - // set reqID on metadata for successful responses. - SetRequestIDMetadata(&metadata, v) - break - } - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go deleted file mode 100644 index db7cda42d9..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ /dev/null @@ -1,261 +0,0 @@ -package middleware - -import ( - "context" - "fmt" - "os" - "runtime" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -var languageVersion = strings.TrimPrefix(runtime.Version(), "go") - -// SDKAgentKeyType is the metadata type to add to the SDK agent string -type SDKAgentKeyType int - -// The set of valid SDKAgentKeyType constants. If an unknown value is assigned for SDKAgentKeyType it will -// be mapped to AdditionalMetadata. -const ( - _ SDKAgentKeyType = iota - APIMetadata - OperatingSystemMetadata - LanguageMetadata - EnvironmentMetadata - FeatureMetadata - ConfigMetadata - FrameworkMetadata - AdditionalMetadata - ApplicationIdentifier -) - -func (k SDKAgentKeyType) string() string { - switch k { - case APIMetadata: - return "api" - case OperatingSystemMetadata: - return "os" - case LanguageMetadata: - return "lang" - case EnvironmentMetadata: - return "exec-env" - case FeatureMetadata: - return "ft" - case ConfigMetadata: - return "cfg" - case FrameworkMetadata: - return "lib" - case ApplicationIdentifier: - return "app" - case AdditionalMetadata: - fallthrough - default: - return "md" - } -} - -const execEnvVar = `AWS_EXECUTION_ENV` - -var validChars = map[rune]bool{ - '!': true, '#': true, '$': true, '%': true, '&': true, '\'': true, '*': true, '+': true, - '-': true, '.': true, '^': true, '_': true, '`': true, '|': true, '~': true, -} - -// RequestUserAgent is a build middleware that set the User-Agent for the request. -type RequestUserAgent struct { - sdkAgent, userAgent *smithyhttp.UserAgentBuilder -} - -// NewRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the -// request. -// -// User-Agent example: -// -// aws-sdk-go-v2/1.2.3 -// -// X-Amz-User-Agent example: -// -// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15 -func NewRequestUserAgent() *RequestUserAgent { - userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder() - addProductName(userAgent) - addProductName(sdkAgent) - - r := &RequestUserAgent{ - sdkAgent: sdkAgent, - userAgent: userAgent, - } - - addSDKMetadata(r) - - return r -} - -func addSDKMetadata(r *RequestUserAgent) { - r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName()) - r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion) - r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS) - r.AddSDKAgentKeyValue(AdditionalMetadata, "GOARCH", runtime.GOARCH) - if ev := os.Getenv(execEnvVar); len(ev) > 0 { - r.AddSDKAgentKey(EnvironmentMetadata, ev) - } -} - -func addProductName(builder *smithyhttp.UserAgentBuilder) { - builder.AddKeyValue(aws.SDKName, aws.SDKVersion) -} - -// AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. -func AddUserAgentKey(key string) func(*middleware.Stack) error { - return func(stack *middleware.Stack) error { - requestUserAgent, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - requestUserAgent.AddUserAgentKey(key) - return nil - } -} - -// AddUserAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. -func AddUserAgentKeyValue(key, value string) func(*middleware.Stack) error { - return func(stack *middleware.Stack) error { - requestUserAgent, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - requestUserAgent.AddUserAgentKeyValue(key, value) - return nil - } -} - -// AddSDKAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. -func AddSDKAgentKey(keyType SDKAgentKeyType, key string) func(*middleware.Stack) error { - return func(stack *middleware.Stack) error { - requestUserAgent, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - requestUserAgent.AddSDKAgentKey(keyType, key) - return nil - } -} - -// AddSDKAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. -func AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) func(*middleware.Stack) error { - return func(stack *middleware.Stack) error { - requestUserAgent, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - requestUserAgent.AddSDKAgentKeyValue(keyType, key, value) - return nil - } -} - -// AddRequestUserAgentMiddleware registers a requestUserAgent middleware on the stack if not present. -func AddRequestUserAgentMiddleware(stack *middleware.Stack) error { - _, err := getOrAddRequestUserAgent(stack) - return err -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*RequestUserAgent, error) { - id := (*RequestUserAgent)(nil).ID() - bm, ok := stack.Build.Get(id) - if !ok { - bm = NewRequestUserAgent() - err := stack.Build.Add(bm, middleware.After) - if err != nil { - return nil, err - } - } - - requestUserAgent, ok := bm.(*RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id) - } - - return requestUserAgent, nil -} - -// AddUserAgentKey adds the component identified by name to the User-Agent string. -func (u *RequestUserAgent) AddUserAgentKey(key string) { - u.userAgent.AddKey(strings.Map(rules, key)) -} - -// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. -func (u *RequestUserAgent) AddUserAgentKeyValue(key, value string) { - u.userAgent.AddKeyValue(strings.Map(rules, key), strings.Map(rules, value)) -} - -// AddSDKAgentKey adds the component identified by name to the User-Agent string. -func (u *RequestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) { - // TODO: should target sdkAgent - u.userAgent.AddKey(keyType.string() + "/" + strings.Map(rules, key)) -} - -// AddSDKAgentKeyValue adds the key identified by the given name and value to the User-Agent string. -func (u *RequestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) { - // TODO: should target sdkAgent - u.userAgent.AddKeyValue(keyType.string(), strings.Map(rules, key)+"#"+strings.Map(rules, value)) -} - -// ID the name of the middleware. -func (u *RequestUserAgent) ID() string { - return "UserAgent" -} - -// HandleBuild adds or appends the constructed user agent to the request. -func (u *RequestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - switch req := in.Request.(type) { - case *smithyhttp.Request: - u.addHTTPUserAgent(req) - // TODO: To be re-enabled - // u.addHTTPSDKAgent(req) - default: - return out, metadata, fmt.Errorf("unknown transport type %T", in) - } - - return next.HandleBuild(ctx, in) -} - -func (u *RequestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { - const userAgent = "User-Agent" - updateHTTPHeader(request, userAgent, u.userAgent.Build()) -} - -func (u *RequestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { - const sdkAgent = "X-Amz-User-Agent" - updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build()) -} - -func updateHTTPHeader(request *smithyhttp.Request, header string, value string) { - var current string - if v := request.Header[header]; len(v) > 0 { - current = v[0] - } - if len(current) > 0 { - current = value + " " + current - } else { - current = value - } - request.Header[header] = append(request.Header[header][:0], current) -} - -func rules(r rune) rune { - switch { - case r >= '0' && r <= '9': - return r - case r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z': - return r - case validChars[r]: - return r - default: - return '-' - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go deleted file mode 100644 index 47ebc0f547..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go +++ /dev/null @@ -1,72 +0,0 @@ -package query - -import ( - "fmt" - "net/url" -) - -// Array represents the encoding of Query lists and sets. A Query array is a -// representation of a list of values of a fixed type. A serialized array might -// look like the following: -// -// ListName.member.1=foo -// &ListName.member.2=bar -// &Listname.member.3=baz -type Array struct { - // The query values to add the array to. - values url.Values - // The array's prefix, which includes the names of all parent structures - // and ends with the name of the list. For example, the prefix might be - // "ParentStructure.ListName". This prefix will be used to form the full - // keys for each element in the list. For example, an entry might have the - // key "ParentStructure.ListName.member.MemberName.1". - // - // While this is currently represented as a string that gets added to, it - // could also be represented as a stack that only gets condensed into a - // string when a finalized key is created. This could potentially reduce - // allocations. - prefix string - // Whether the list is flat or not. A list that is not flat will produce the - // following entry to the url.Values for a given entry: - // ListName.MemberName.1=value - // A list that is flat will produce the following: - // ListName.1=value - flat bool - // The location name of the member. In most cases this should be "member". - memberName string - // Elements are stored in values, so we keep track of the list size here. - size int32 - // Empty lists are encoded as "=", if we add a value later we will - // remove this encoding - emptyValue Value -} - -func newArray(values url.Values, prefix string, flat bool, memberName string) *Array { - emptyValue := newValue(values, prefix, flat) - emptyValue.String("") - - return &Array{ - values: values, - prefix: prefix, - flat: flat, - memberName: memberName, - emptyValue: emptyValue, - } -} - -// Value adds a new element to the Query Array. Returns a Value type used to -// encode the array element. -func (a *Array) Value() Value { - if a.size == 0 { - delete(a.values, a.emptyValue.key) - } - - // Query lists start a 1, so adjust the size first - a.size++ - prefix := a.prefix - if !a.flat { - prefix = fmt.Sprintf("%s.%s", prefix, a.memberName) - } - // Lists can't have flat members - return newValue(a.values, fmt.Sprintf("%s.%d", prefix, a.size), false) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go deleted file mode 100644 index 2ecf9241cd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go +++ /dev/null @@ -1,80 +0,0 @@ -package query - -import ( - "io" - "net/url" - "sort" -) - -// Encoder is a Query encoder that supports construction of Query body -// values using methods. -type Encoder struct { - // The query values that will be built up to manage encoding. - values url.Values - // The writer that the encoded body will be written to. - writer io.Writer - Value -} - -// NewEncoder returns a new Query body encoder -func NewEncoder(writer io.Writer) *Encoder { - values := url.Values{} - return &Encoder{ - values: values, - writer: writer, - Value: newBaseValue(values), - } -} - -// Encode returns the []byte slice representing the current -// state of the Query encoder. -func (e Encoder) Encode() error { - ws, ok := e.writer.(interface{ WriteString(string) (int, error) }) - if !ok { - // Fall back to less optimal byte slice casting if WriteString isn't available. - ws = &wrapWriteString{writer: e.writer} - } - - // Get the keys and sort them to have a stable output - keys := make([]string, 0, len(e.values)) - for k := range e.values { - keys = append(keys, k) - } - sort.Strings(keys) - isFirstEntry := true - for _, key := range keys { - queryValues := e.values[key] - escapedKey := url.QueryEscape(key) - for _, value := range queryValues { - if !isFirstEntry { - if _, err := ws.WriteString(`&`); err != nil { - return err - } - } else { - isFirstEntry = false - } - if _, err := ws.WriteString(escapedKey); err != nil { - return err - } - if _, err := ws.WriteString(`=`); err != nil { - return err - } - if _, err := ws.WriteString(url.QueryEscape(value)); err != nil { - return err - } - } - } - return nil -} - -// wrapWriteString wraps an io.Writer to provide a WriteString method -// where one is not available. -type wrapWriteString struct { - writer io.Writer -} - -// WriteString writes a string to the wrapped writer by casting it to -// a byte array first. -func (w wrapWriteString) WriteString(v string) (int, error) { - return w.writer.Write([]byte(v)) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go deleted file mode 100644 index dea242b8b6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go +++ /dev/null @@ -1,78 +0,0 @@ -package query - -import ( - "fmt" - "net/url" -) - -// Map represents the encoding of Query maps. A Query map is a representation -// of a mapping of arbitrary string keys to arbitrary values of a fixed type. -// A Map differs from an Object in that the set of keys is not fixed, in that -// the values must all be of the same type, and that map entries are ordered. -// A serialized map might look like the following: -// -// MapName.entry.1.key=Foo -// &MapName.entry.1.value=spam -// &MapName.entry.2.key=Bar -// &MapName.entry.2.value=eggs -type Map struct { - // The query values to add the map to. - values url.Values - // The map's prefix, which includes the names of all parent structures - // and ends with the name of the object. For example, the prefix might be - // "ParentStructure.MapName". This prefix will be used to form the full - // keys for each key-value pair of the map. For example, a value might have - // the key "ParentStructure.MapName.1.value". - // - // While this is currently represented as a string that gets added to, it - // could also be represented as a stack that only gets condensed into a - // string when a finalized key is created. This could potentially reduce - // allocations. - prefix string - // Whether the map is flat or not. A map that is not flat will produce the - // following entries to the url.Values for a given key-value pair: - // MapName.entry.1.KeyLocationName=mykey - // MapName.entry.1.ValueLocationName=myvalue - // A map that is flat will produce the following: - // MapName.1.KeyLocationName=mykey - // MapName.1.ValueLocationName=myvalue - flat bool - // The location name of the key. In most cases this should be "key". - keyLocationName string - // The location name of the value. In most cases this should be "value". - valueLocationName string - // Elements are stored in values, so we keep track of the list size here. - size int32 -} - -func newMap(values url.Values, prefix string, flat bool, keyLocationName string, valueLocationName string) *Map { - return &Map{ - values: values, - prefix: prefix, - flat: flat, - keyLocationName: keyLocationName, - valueLocationName: valueLocationName, - } -} - -// Key adds the given named key to the Query map. -// Returns a Value encoder that should be used to encode a Query value type. -func (m *Map) Key(name string) Value { - // Query lists start a 1, so adjust the size first - m.size++ - var key string - var value string - if m.flat { - key = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.keyLocationName) - value = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.valueLocationName) - } else { - key = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.keyLocationName) - value = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.valueLocationName) - } - - // The key can only be a string, so we just go ahead and set it here - newValue(m.values, key, false).String(name) - - // Maps can't have flat members - return newValue(m.values, value, false) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go deleted file mode 100644 index 3603447911..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go +++ /dev/null @@ -1,62 +0,0 @@ -package query - -import ( - "context" - "fmt" - "io/ioutil" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// AddAsGetRequestMiddleware adds a middleware to the Serialize stack after the -// operation serializer that will convert the query request body to a GET -// operation with the query message in the HTTP request querystring. -func AddAsGetRequestMiddleware(stack *middleware.Stack) error { - return stack.Serialize.Insert(&asGetRequest{}, "OperationSerializer", middleware.After) -} - -type asGetRequest struct{} - -func (*asGetRequest) ID() string { return "Query:AsGetRequest" } - -func (m *asGetRequest) HandleSerialize( - ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - req, ok := input.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("expect smithy HTTP Request, got %T", input.Request) - } - - req.Method = "GET" - - // If the stream is not set, nothing else to do. - stream := req.GetStream() - if stream == nil { - return next.HandleSerialize(ctx, input) - } - - // Clear the stream since there will not be any body. - req.Header.Del("Content-Type") - req, err = req.SetStream(nil) - if err != nil { - return out, metadata, fmt.Errorf("unable update request body %w", err) - } - input.Request = req - - // Update request query with the body's query string value. - delim := "" - if len(req.URL.RawQuery) != 0 { - delim = "&" - } - - b, err := ioutil.ReadAll(stream) - if err != nil { - return out, metadata, fmt.Errorf("unable to get request body %w", err) - } - req.URL.RawQuery += delim + string(b) - - return next.HandleSerialize(ctx, input) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go deleted file mode 100644 index 455b92515c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go +++ /dev/null @@ -1,69 +0,0 @@ -package query - -import ( - "fmt" - "net/url" -) - -// Object represents the encoding of Query structures and unions. A Query -// object is a representation of a mapping of string keys to arbitrary -// values where there is a fixed set of keys whose values each have their -// own known type. A serialized object might look like the following: -// -// ObjectName.Foo=value -// &ObjectName.Bar=5 -type Object struct { - // The query values to add the object to. - values url.Values - // The object's prefix, which includes the names of all parent structures - // and ends with the name of the object. For example, the prefix might be - // "ParentStructure.ObjectName". This prefix will be used to form the full - // keys for each member of the object. For example, a member might have the - // key "ParentStructure.ObjectName.MemberName". - // - // While this is currently represented as a string that gets added to, it - // could also be represented as a stack that only gets condensed into a - // string when a finalized key is created. This could potentially reduce - // allocations. - prefix string -} - -func newObject(values url.Values, prefix string) *Object { - return &Object{ - values: values, - prefix: prefix, - } -} - -// Key adds the given named key to the Query object. -// Returns a Value encoder that should be used to encode a Query value type. -func (o *Object) Key(name string) Value { - return o.key(name, false) -} - -// KeyWithValues adds the given named key to the Query object. -// Returns a Value encoder that should be used to encode a Query list of values. -func (o *Object) KeyWithValues(name string) Value { - return o.keyWithValues(name, false) -} - -// FlatKey adds the given named key to the Query object. -// Returns a Value encoder that should be used to encode a Query value type. The -// value will be flattened if it is a map or array. -func (o *Object) FlatKey(name string) Value { - return o.key(name, true) -} - -func (o *Object) key(name string, flatValue bool) Value { - if o.prefix != "" { - return newValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue) - } - return newValue(o.values, name, flatValue) -} - -func (o *Object) keyWithValues(name string, flatValue bool) Value { - if o.prefix != "" { - return newAppendValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue) - } - return newAppendValue(o.values, name, flatValue) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go deleted file mode 100644 index a9251521f1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go +++ /dev/null @@ -1,115 +0,0 @@ -package query - -import ( - "math/big" - "net/url" - - "github.com/aws/smithy-go/encoding/httpbinding" -) - -// Value represents a Query Value type. -type Value struct { - // The query values to add the value to. - values url.Values - // The value's key, which will form the prefix for complex types. - key string - // Whether the value should be flattened or not if it's a flattenable type. - flat bool - queryValue httpbinding.QueryValue -} - -func newValue(values url.Values, key string, flat bool) Value { - return Value{ - values: values, - key: key, - flat: flat, - queryValue: httpbinding.NewQueryValue(values, key, false), - } -} - -func newAppendValue(values url.Values, key string, flat bool) Value { - return Value{ - values: values, - key: key, - flat: flat, - queryValue: httpbinding.NewQueryValue(values, key, true), - } -} - -func newBaseValue(values url.Values) Value { - return Value{ - values: values, - queryValue: httpbinding.NewQueryValue(nil, "", false), - } -} - -// Array returns a new Array encoder. -func (qv Value) Array(locationName string) *Array { - return newArray(qv.values, qv.key, qv.flat, locationName) -} - -// Object returns a new Object encoder. -func (qv Value) Object() *Object { - return newObject(qv.values, qv.key) -} - -// Map returns a new Map encoder. -func (qv Value) Map(keyLocationName string, valueLocationName string) *Map { - return newMap(qv.values, qv.key, qv.flat, keyLocationName, valueLocationName) -} - -// Base64EncodeBytes encodes v as a base64 query string value. -// This is intended to enable compatibility with the JSON encoder. -func (qv Value) Base64EncodeBytes(v []byte) { - qv.queryValue.Blob(v) -} - -// Boolean encodes v as a query string value -func (qv Value) Boolean(v bool) { - qv.queryValue.Boolean(v) -} - -// String encodes v as a query string value -func (qv Value) String(v string) { - qv.queryValue.String(v) -} - -// Byte encodes v as a query string value -func (qv Value) Byte(v int8) { - qv.queryValue.Byte(v) -} - -// Short encodes v as a query string value -func (qv Value) Short(v int16) { - qv.queryValue.Short(v) -} - -// Integer encodes v as a query string value -func (qv Value) Integer(v int32) { - qv.queryValue.Integer(v) -} - -// Long encodes v as a query string value -func (qv Value) Long(v int64) { - qv.queryValue.Long(v) -} - -// Float encodes v as a query string value -func (qv Value) Float(v float32) { - qv.queryValue.Float(v) -} - -// Double encodes v as a query string value -func (qv Value) Double(v float64) { - qv.queryValue.Double(v) -} - -// BigInteger encodes v as a query string value -func (qv Value) BigInteger(v *big.Int) { - qv.queryValue.BigInteger(v) -} - -// BigDecimal encodes v as a query string value -func (qv Value) BigDecimal(v *big.Float) { - qv.queryValue.BigDecimal(v) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go deleted file mode 100644 index 1bce78a4d4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go +++ /dev/null @@ -1,85 +0,0 @@ -package restjson - -import ( - "encoding/json" - "io" - "strings" - - "github.com/aws/smithy-go" -) - -// GetErrorInfo util looks for code, __type, and message members in the -// json body. These members are optionally available, and the function -// returns the value of member if it is available. This function is useful to -// identify the error code, msg in a REST JSON error response. -func GetErrorInfo(decoder *json.Decoder) (errorType string, message string, err error) { - var errInfo struct { - Code string - Type string `json:"__type"` - Message string - } - - err = decoder.Decode(&errInfo) - if err != nil { - if err == io.EOF { - return errorType, message, nil - } - return errorType, message, err - } - - // assign error type - if len(errInfo.Code) != 0 { - errorType = errInfo.Code - } else if len(errInfo.Type) != 0 { - errorType = errInfo.Type - } - - // assign error message - if len(errInfo.Message) != 0 { - message = errInfo.Message - } - - // sanitize error - if len(errorType) != 0 { - errorType = SanitizeErrorCode(errorType) - } - - return errorType, message, nil -} - -// SanitizeErrorCode sanitizes the errorCode string . -// The rule for sanitizing is if a `:` character is present, then take only the -// contents before the first : character in the value. -// If a # character is present, then take only the contents after the -// first # character in the value. -func SanitizeErrorCode(errorCode string) string { - if strings.ContainsAny(errorCode, ":") { - errorCode = strings.SplitN(errorCode, ":", 2)[0] - } - - if strings.ContainsAny(errorCode, "#") { - errorCode = strings.SplitN(errorCode, "#", 2)[1] - } - - return errorCode -} - -// GetSmithyGenericAPIError returns smithy generic api error and an error interface. -// Takes in json decoder, and error Code string as args. The function retrieves error message -// and error code from the decoder body. If errorCode of length greater than 0 is passed in as -// an argument, it is used instead. -func GetSmithyGenericAPIError(decoder *json.Decoder, errorCode string) (*smithy.GenericAPIError, error) { - errorType, message, err := GetErrorInfo(decoder) - if err != nil { - return nil, err - } - - if len(errorCode) == 0 { - errorCode = errorType - } - - return &smithy.GenericAPIError{ - Code: errorCode, - Message: message, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go deleted file mode 100644 index 6975ce6524..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go +++ /dev/null @@ -1,48 +0,0 @@ -package xml - -import ( - "encoding/xml" - "fmt" - "io" -) - -// ErrorComponents represents the error response fields -// that will be deserialized from an xml error response body -type ErrorComponents struct { - Code string - Message string - RequestID string -} - -// GetErrorResponseComponents returns the error fields from an xml error response body -func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { - if noErrorWrapping { - var errResponse noWrappedErrorResponse - if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) - } - return ErrorComponents(errResponse), nil - } - - var errResponse wrappedErrorResponse - if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) - } - return ErrorComponents(errResponse), nil -} - -// noWrappedErrorResponse represents the error response body with -// no internal Error wrapping -type noWrappedErrorResponse struct { - Code string `xml:"Code"` - Message string `xml:"Message"` - RequestID string `xml:"RequestId"` -} - -// wrappedErrorResponse represents the error response body -// wrapped within Error -type wrappedErrorResponse struct { - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go deleted file mode 100644 index 8c78364105..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go +++ /dev/null @@ -1,20 +0,0 @@ -package ratelimit - -import "context" - -// None implements a no-op rate limiter which effectively disables client-side -// rate limiting (also known as "retry quotas"). -// -// GetToken does nothing and always returns a nil error. The returned -// token-release function does nothing, and always returns a nil error. -// -// AddTokens does nothing and always returns a nil error. -var None = &none{} - -type none struct{} - -func (*none) GetToken(ctx context.Context, cost uint) (func() error, error) { - return func() error { return nil }, nil -} - -func (*none) AddTokens(v uint) error { return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go deleted file mode 100644 index 974ef594f0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go +++ /dev/null @@ -1,96 +0,0 @@ -package ratelimit - -import ( - "sync" -) - -// TokenBucket provides a concurrency safe utility for adding and removing -// tokens from the available token bucket. -type TokenBucket struct { - remainingTokens uint - maxCapacity uint - minCapacity uint - mu sync.Mutex -} - -// NewTokenBucket returns an initialized TokenBucket with the capacity -// specified. -func NewTokenBucket(i uint) *TokenBucket { - return &TokenBucket{ - remainingTokens: i, - maxCapacity: i, - minCapacity: 1, - } -} - -// Retrieve attempts to reduce the available tokens by the amount requested. If -// there are tokens available true will be returned along with the number of -// available tokens remaining. If amount requested is larger than the available -// capacity, false will be returned along with the available capacity. If the -// amount is less than the available capacity, the capacity will be reduced by -// that amount, and the remaining capacity and true will be returned. -func (t *TokenBucket) Retrieve(amount uint) (available uint, retrieved bool) { - t.mu.Lock() - defer t.mu.Unlock() - - if amount > t.remainingTokens { - return t.remainingTokens, false - } - - t.remainingTokens -= amount - return t.remainingTokens, true -} - -// Refund returns the amount of tokens back to the available token bucket, up -// to the initial capacity. -func (t *TokenBucket) Refund(amount uint) { - t.mu.Lock() - defer t.mu.Unlock() - - // Capacity cannot exceed max capacity. - t.remainingTokens = uintMin(t.remainingTokens+amount, t.maxCapacity) -} - -// Capacity returns the maximum capacity of tokens that the bucket could -// contain. -func (t *TokenBucket) Capacity() uint { - t.mu.Lock() - defer t.mu.Unlock() - - return t.maxCapacity -} - -// Remaining returns the number of tokens that remaining in the bucket. -func (t *TokenBucket) Remaining() uint { - t.mu.Lock() - defer t.mu.Unlock() - - return t.remainingTokens -} - -// Resize adjusts the size of the token bucket. Returns the capacity remaining. -func (t *TokenBucket) Resize(size uint) uint { - t.mu.Lock() - defer t.mu.Unlock() - - t.maxCapacity = uintMax(size, t.minCapacity) - - // Capacity needs to be capped at max capacity, if max size reduced. - t.remainingTokens = uintMin(t.remainingTokens, t.maxCapacity) - - return t.remainingTokens -} - -func uintMin(a, b uint) uint { - if a < b { - return a - } - return b -} - -func uintMax(a, b uint) uint { - if a > b { - return a - } - return b -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go deleted file mode 100644 index d89090ad38..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go +++ /dev/null @@ -1,83 +0,0 @@ -package ratelimit - -import ( - "context" - "fmt" -) - -type rateToken struct { - tokenCost uint - bucket *TokenBucket -} - -func (t rateToken) release() error { - t.bucket.Refund(t.tokenCost) - return nil -} - -// TokenRateLimit provides a Token Bucket RateLimiter implementation -// that limits the overall number of retry attempts that can be made across -// operation invocations. -type TokenRateLimit struct { - bucket *TokenBucket -} - -// NewTokenRateLimit returns an TokenRateLimit with default values. -// Functional options can configure the retry rate limiter. -func NewTokenRateLimit(tokens uint) *TokenRateLimit { - return &TokenRateLimit{ - bucket: NewTokenBucket(tokens), - } -} - -type canceledError struct { - Err error -} - -func (c canceledError) CanceledError() bool { return true } -func (c canceledError) Unwrap() error { return c.Err } -func (c canceledError) Error() string { - return fmt.Sprintf("canceled, %v", c.Err) -} - -// GetToken may cause a available pool of retry quota to be -// decremented. Will return an error if the decremented value can not be -// reduced from the retry quota. -func (l *TokenRateLimit) GetToken(ctx context.Context, cost uint) (func() error, error) { - select { - case <-ctx.Done(): - return nil, canceledError{Err: ctx.Err()} - default: - } - if avail, ok := l.bucket.Retrieve(cost); !ok { - return nil, QuotaExceededError{Available: avail, Requested: cost} - } - - return rateToken{ - tokenCost: cost, - bucket: l.bucket, - }.release, nil -} - -// AddTokens increments the token bucket by a fixed amount. -func (l *TokenRateLimit) AddTokens(v uint) error { - l.bucket.Refund(v) - return nil -} - -// Remaining returns the number of remaining tokens in the bucket. -func (l *TokenRateLimit) Remaining() uint { - return l.bucket.Remaining() -} - -// QuotaExceededError provides the SDK error when the retries for a given -// token bucket have been exhausted. -type QuotaExceededError struct { - Available uint - Requested uint -} - -func (e QuotaExceededError) Error() string { - return fmt.Sprintf("retry quota exceeded, %d available, %d requested", - e.Available, e.Requested) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go deleted file mode 100644 index d8d00e6158..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go +++ /dev/null @@ -1,25 +0,0 @@ -package aws - -import ( - "fmt" -) - -// TODO remove replace with smithy.CanceledError - -// RequestCanceledError is the error that will be returned by an API request -// that was canceled. Requests given a Context may return this error when -// canceled. -type RequestCanceledError struct { - Err error -} - -// CanceledError returns true to satisfy interfaces checking for canceled errors. -func (*RequestCanceledError) CanceledError() bool { return true } - -// Unwrap returns the underlying error, if there was one. -func (e *RequestCanceledError) Unwrap() error { - return e.Err -} -func (e *RequestCanceledError) Error() string { - return fmt.Sprintf("request canceled, %v", e.Err) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go deleted file mode 100644 index 4dfde85737..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go +++ /dev/null @@ -1,156 +0,0 @@ -package retry - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdk" -) - -const ( - // DefaultRequestCost is the cost of a single request from the adaptive - // rate limited token bucket. - DefaultRequestCost uint = 1 -) - -// DefaultThrottles provides the set of errors considered throttle errors that -// are checked by default. -var DefaultThrottles = []IsErrorThrottle{ - ThrottleErrorCode{ - Codes: DefaultThrottleErrorCodes, - }, -} - -// AdaptiveModeOptions provides the functional options for configuring the -// adaptive retry mode, and delay behavior. -type AdaptiveModeOptions struct { - // If the adaptive token bucket is empty, when an attempt will be made - // AdaptiveMode will sleep until a token is available. This can occur when - // attempts fail with throttle errors. Use this option to disable the sleep - // until token is available, and return error immediately. - FailOnNoAttemptTokens bool - - // The cost of an attempt from the AdaptiveMode's adaptive token bucket. - RequestCost uint - - // Set of strategies to determine if the attempt failed due to a throttle - // error. - // - // It is safe to append to this list in NewAdaptiveMode's functional options. - Throttles []IsErrorThrottle - - // Set of options for standard retry mode that AdaptiveMode is built on top - // of. AdaptiveMode may apply its own defaults to Standard retry mode that - // are different than the defaults of NewStandard. Use these options to - // override the default options. - StandardOptions []func(*StandardOptions) -} - -// AdaptiveMode provides an experimental retry strategy that expands on the -// Standard retry strategy, adding client attempt rate limits. The attempt rate -// limit is initially unrestricted, but becomes restricted when the attempt -// fails with for a throttle error. When restricted AdaptiveMode may need to -// sleep before an attempt is made, if too many throttles have been received. -// AdaptiveMode's sleep can be canceled with context cancel. Set -// AdaptiveModeOptions FailOnNoAttemptTokens to change the behavior from sleep, -// to fail fast. -// -// Eventually unrestricted attempt rate limit will be restored once attempts no -// longer are failing due to throttle errors. -type AdaptiveMode struct { - options AdaptiveModeOptions - throttles IsErrorThrottles - - retryer aws.RetryerV2 - rateLimit *adaptiveRateLimit -} - -// NewAdaptiveMode returns an initialized AdaptiveMode retry strategy. -func NewAdaptiveMode(optFns ...func(*AdaptiveModeOptions)) *AdaptiveMode { - o := AdaptiveModeOptions{ - RequestCost: DefaultRequestCost, - Throttles: append([]IsErrorThrottle{}, DefaultThrottles...), - } - for _, fn := range optFns { - fn(&o) - } - - return &AdaptiveMode{ - options: o, - throttles: IsErrorThrottles(o.Throttles), - retryer: NewStandard(o.StandardOptions...), - rateLimit: newAdaptiveRateLimit(), - } -} - -// IsErrorRetryable returns if the failed attempt is retryable. This check -// should determine if the error can be retried, or if the error is -// terminal. -func (a *AdaptiveMode) IsErrorRetryable(err error) bool { - return a.retryer.IsErrorRetryable(err) -} - -// MaxAttempts returns the maximum number of attempts that can be made for -// an attempt before failing. A value of 0 implies that the attempt should -// be retried until it succeeds if the errors are retryable. -func (a *AdaptiveMode) MaxAttempts() int { - return a.retryer.MaxAttempts() -} - -// RetryDelay returns the delay that should be used before retrying the -// attempt. Will return error if the if the delay could not be determined. -func (a *AdaptiveMode) RetryDelay(attempt int, opErr error) ( - time.Duration, error, -) { - return a.retryer.RetryDelay(attempt, opErr) -} - -// GetRetryToken attempts to deduct the retry cost from the retry token pool. -// Returning the token release function, or error. -func (a *AdaptiveMode) GetRetryToken(ctx context.Context, opErr error) ( - releaseToken func(error) error, err error, -) { - return a.retryer.GetRetryToken(ctx, opErr) -} - -// GetInitialToken returns the initial attempt token that can increment the -// retry token pool if the attempt is successful. -// -// Deprecated: This method does not provide a way to block using Context, -// nor can it return an error. Use RetryerV2, and GetAttemptToken instead. Only -// present to implement Retryer interface. -func (a *AdaptiveMode) GetInitialToken() (releaseToken func(error) error) { - return nopRelease -} - -// GetAttemptToken returns the attempt token that can be used to rate limit -// attempt calls. Will be used by the SDK's retry package's Attempt -// middleware to get an attempt token prior to calling the temp and releasing -// the attempt token after the attempt has been made. -func (a *AdaptiveMode) GetAttemptToken(ctx context.Context) (func(error) error, error) { - for { - acquiredToken, waitTryAgain := a.rateLimit.AcquireToken(a.options.RequestCost) - if acquiredToken { - break - } - if a.options.FailOnNoAttemptTokens { - return nil, fmt.Errorf( - "unable to get attempt token, and FailOnNoAttemptTokens enables") - } - - if err := sdk.SleepWithContext(ctx, waitTryAgain); err != nil { - return nil, fmt.Errorf("failed to wait for token to be available, %w", err) - } - } - - return a.handleResponse, nil -} - -func (a *AdaptiveMode) handleResponse(opErr error) error { - throttled := a.throttles.IsErrorThrottle(opErr).Bool() - - a.rateLimit.Update(throttled) - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go deleted file mode 100644 index ad96d9b8c5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go +++ /dev/null @@ -1,158 +0,0 @@ -package retry - -import ( - "math" - "sync" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/sdk" -) - -type adaptiveRateLimit struct { - tokenBucketEnabled bool - - smooth float64 - beta float64 - scaleConstant float64 - minFillRate float64 - - fillRate float64 - calculatedRate float64 - lastRefilled time.Time - measuredTxRate float64 - lastTxRateBucket float64 - requestCount int64 - lastMaxRate float64 - lastThrottleTime time.Time - timeWindow float64 - - tokenBucket *adaptiveTokenBucket - - mu sync.Mutex -} - -func newAdaptiveRateLimit() *adaptiveRateLimit { - now := sdk.NowTime() - return &adaptiveRateLimit{ - smooth: 0.8, - beta: 0.7, - scaleConstant: 0.4, - - minFillRate: 0.5, - - lastTxRateBucket: math.Floor(timeFloat64Seconds(now)), - lastThrottleTime: now, - - tokenBucket: newAdaptiveTokenBucket(0), - } -} - -func (a *adaptiveRateLimit) Enable(v bool) { - a.mu.Lock() - defer a.mu.Unlock() - - a.tokenBucketEnabled = v -} - -func (a *adaptiveRateLimit) AcquireToken(amount uint) ( - tokenAcquired bool, waitTryAgain time.Duration, -) { - a.mu.Lock() - defer a.mu.Unlock() - - if !a.tokenBucketEnabled { - return true, 0 - } - - a.tokenBucketRefill() - - available, ok := a.tokenBucket.Retrieve(float64(amount)) - if !ok { - waitDur := float64Seconds((float64(amount) - available) / a.fillRate) - return false, waitDur - } - - return true, 0 -} - -func (a *adaptiveRateLimit) Update(throttled bool) { - a.mu.Lock() - defer a.mu.Unlock() - - a.updateMeasuredRate() - - if throttled { - rateToUse := a.measuredTxRate - if a.tokenBucketEnabled { - rateToUse = math.Min(a.measuredTxRate, a.fillRate) - } - - a.lastMaxRate = rateToUse - a.calculateTimeWindow() - a.lastThrottleTime = sdk.NowTime() - a.calculatedRate = a.cubicThrottle(rateToUse) - a.tokenBucketEnabled = true - } else { - a.calculateTimeWindow() - a.calculatedRate = a.cubicSuccess(sdk.NowTime()) - } - - newRate := math.Min(a.calculatedRate, 2*a.measuredTxRate) - a.tokenBucketUpdateRate(newRate) -} - -func (a *adaptiveRateLimit) cubicSuccess(t time.Time) float64 { - dt := secondsFloat64(t.Sub(a.lastThrottleTime)) - return (a.scaleConstant * math.Pow(dt-a.timeWindow, 3)) + a.lastMaxRate -} - -func (a *adaptiveRateLimit) cubicThrottle(rateToUse float64) float64 { - return rateToUse * a.beta -} - -func (a *adaptiveRateLimit) calculateTimeWindow() { - a.timeWindow = math.Pow((a.lastMaxRate*(1.-a.beta))/a.scaleConstant, 1./3.) -} - -func (a *adaptiveRateLimit) tokenBucketUpdateRate(newRPS float64) { - a.tokenBucketRefill() - a.fillRate = math.Max(newRPS, a.minFillRate) - a.tokenBucket.Resize(newRPS) -} - -func (a *adaptiveRateLimit) updateMeasuredRate() { - now := sdk.NowTime() - timeBucket := math.Floor(timeFloat64Seconds(now)*2.) / 2. - a.requestCount++ - - if timeBucket > a.lastTxRateBucket { - currentRate := float64(a.requestCount) / (timeBucket - a.lastTxRateBucket) - a.measuredTxRate = (currentRate * a.smooth) + (a.measuredTxRate * (1. - a.smooth)) - a.requestCount = 0 - a.lastTxRateBucket = timeBucket - } -} - -func (a *adaptiveRateLimit) tokenBucketRefill() { - now := sdk.NowTime() - if a.lastRefilled.IsZero() { - a.lastRefilled = now - return - } - - fillAmount := secondsFloat64(now.Sub(a.lastRefilled)) * a.fillRate - a.tokenBucket.Refund(fillAmount) - a.lastRefilled = now -} - -func float64Seconds(v float64) time.Duration { - return time.Duration(v * float64(time.Second)) -} - -func secondsFloat64(v time.Duration) float64 { - return float64(v) / float64(time.Second) -} - -func timeFloat64Seconds(v time.Time) float64 { - return float64(v.UnixNano()) / float64(time.Second) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go deleted file mode 100644 index 052723e8ed..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go +++ /dev/null @@ -1,83 +0,0 @@ -package retry - -import ( - "math" - "sync" -) - -// adaptiveTokenBucket provides a concurrency safe utility for adding and -// removing tokens from the available token bucket. -type adaptiveTokenBucket struct { - remainingTokens float64 - maxCapacity float64 - minCapacity float64 - mu sync.Mutex -} - -// newAdaptiveTokenBucket returns an initialized adaptiveTokenBucket with the -// capacity specified. -func newAdaptiveTokenBucket(i float64) *adaptiveTokenBucket { - return &adaptiveTokenBucket{ - remainingTokens: i, - maxCapacity: i, - minCapacity: 1, - } -} - -// Retrieve attempts to reduce the available tokens by the amount requested. If -// there are tokens available true will be returned along with the number of -// available tokens remaining. If amount requested is larger than the available -// capacity, false will be returned along with the available capacity. If the -// amount is less than the available capacity, the capacity will be reduced by -// that amount, and the remaining capacity and true will be returned. -func (t *adaptiveTokenBucket) Retrieve(amount float64) (available float64, retrieved bool) { - t.mu.Lock() - defer t.mu.Unlock() - - if amount > t.remainingTokens { - return t.remainingTokens, false - } - - t.remainingTokens -= amount - return t.remainingTokens, true -} - -// Refund returns the amount of tokens back to the available token bucket, up -// to the initial capacity. -func (t *adaptiveTokenBucket) Refund(amount float64) { - t.mu.Lock() - defer t.mu.Unlock() - - // Capacity cannot exceed max capacity. - t.remainingTokens = math.Min(t.remainingTokens+amount, t.maxCapacity) -} - -// Capacity returns the maximum capacity of tokens that the bucket could -// contain. -func (t *adaptiveTokenBucket) Capacity() float64 { - t.mu.Lock() - defer t.mu.Unlock() - - return t.maxCapacity -} - -// Remaining returns the number of tokens that remaining in the bucket. -func (t *adaptiveTokenBucket) Remaining() float64 { - t.mu.Lock() - defer t.mu.Unlock() - - return t.remainingTokens -} - -// Resize adjusts the size of the token bucket. Returns the capacity remaining. -func (t *adaptiveTokenBucket) Resize(size float64) float64 { - t.mu.Lock() - defer t.mu.Unlock() - - t.maxCapacity = math.Max(size, t.minCapacity) - - // Capacity needs to be capped at max capacity, if max size reduced. - t.remainingTokens = math.Min(t.remainingTokens, t.maxCapacity) - - return t.remainingTokens -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go deleted file mode 100644 index 3a08ebe0a7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go +++ /dev/null @@ -1,80 +0,0 @@ -// Package retry provides interfaces and implementations for SDK request retry behavior. -// -// # Retryer Interface and Implementations -// -// This package defines Retryer interface that is used to either implement custom retry behavior -// or to extend the existing retry implementations provided by the SDK. This package provides a single -// retry implementation: Standard. -// -// # Standard -// -// Standard is the default retryer implementation used by service clients. The standard retryer is a rate limited -// retryer that has a configurable max attempts to limit the number of retry attempts when a retryable error occurs. -// In addition, the retryer uses a configurable token bucket to rate limit the retry attempts across the client, -// and uses an additional delay policy to limit the time between a requests subsequent attempts. -// -// By default the standard retryer uses the DefaultRetryables slice of IsErrorRetryable types to determine whether -// a given error is retryable. By default this list of retryables includes the following: -// - Retrying errors that implement the RetryableError method, and return true. -// - Connection Errors -// - Errors that implement a ConnectionError, Temporary, or Timeout method that return true. -// - Connection Reset Errors. -// - net.OpErr types that are dialing errors or are temporary. -// - HTTP Status Codes: 500, 502, 503, and 504. -// - API Error Codes -// - RequestTimeout, RequestTimeoutException -// - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException, -// RequestThrottled, SlowDown, EC2ThrottledException -// - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException -// - TransactionInProgressException, PriorRequestNotComplete -// -// The standard retryer will not retry a request in the event if the context associated with the request -// has been cancelled. Applications must handle this case explicitly if they wish to retry with a different context -// value. -// -// You can configure the standard retryer implementation to fit your applications by constructing a standard retryer -// using the NewStandard function, and providing one more functional argument that mutate the StandardOptions -// structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions, -// and the retry delay policy. -// -// For example to modify the default retry attempts for the standard retryer: -// -// // configure the custom retryer -// customRetry := retry.NewStandard(func(o *retry.StandardOptions) { -// o.MaxAttempts = 5 -// }) -// -// // create a service client with the retryer -// s3.NewFromConfig(cfg, func(o *s3.Options) { -// o.Retryer = customRetry -// }) -// -// # Utilities -// -// A number of package functions have been provided to easily wrap retryer implementations in an implementation agnostic -// way. These are: -// -// AddWithErrorCodes - Provides the ability to add additional API error codes that should be considered retryable -// in addition to those considered retryable by the provided retryer. -// -// AddWithMaxAttempts - Provides the ability to set the max number of attempts for retrying a request by wrapping -// a retryer implementation. -// -// AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a -// request by wrapping a retryer implementation. -// -// The following package functions have been provided to easily satisfy different retry interfaces to further customize -// a given retryer's behavior: -// -// BackoffDelayerFunc - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example, -// you can use this method to easily create custom back off policies to be used with the -// standard retryer. -// -// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example, -// this can be used to extend the standard retryer to add additional logic to determine if an -// error should be retried. -// -// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example, -// this can be used to extend the standard retryer to add additional logic to determine if an -// error should be considered a timeout. -package retry diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go deleted file mode 100644 index 3e432eefe7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go +++ /dev/null @@ -1,20 +0,0 @@ -package retry - -import "fmt" - -// MaxAttemptsError provides the error when the maximum number of attempts have -// been exceeded. -type MaxAttemptsError struct { - Attempt int - Err error -} - -func (e *MaxAttemptsError) Error() string { - return fmt.Sprintf("exceeded maximum number of attempts, %d, %v", e.Attempt, e.Err) -} - -// Unwrap returns the nested error causing the max attempts error. Provides the -// implementation for errors.Is and errors.As to unwrap nested errors. -func (e *MaxAttemptsError) Unwrap() error { - return e.Err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go deleted file mode 100644 index c266996dea..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go +++ /dev/null @@ -1,49 +0,0 @@ -package retry - -import ( - "math" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/rand" - "github.com/aws/aws-sdk-go-v2/internal/timeconv" -) - -// ExponentialJitterBackoff provides backoff delays with jitter based on the -// number of attempts. -type ExponentialJitterBackoff struct { - maxBackoff time.Duration - // precomputed number of attempts needed to reach max backoff. - maxBackoffAttempts float64 - - randFloat64 func() (float64, error) -} - -// NewExponentialJitterBackoff returns an ExponentialJitterBackoff configured -// for the max backoff. -func NewExponentialJitterBackoff(maxBackoff time.Duration) *ExponentialJitterBackoff { - return &ExponentialJitterBackoff{ - maxBackoff: maxBackoff, - maxBackoffAttempts: math.Log2( - float64(maxBackoff) / float64(time.Second)), - randFloat64: rand.CryptoRandFloat64, - } -} - -// BackoffDelay returns the duration to wait before the next attempt should be -// made. Returns an error if unable get a duration. -func (j *ExponentialJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) { - if attempt > int(j.maxBackoffAttempts) { - return j.maxBackoff, nil - } - - b, err := j.randFloat64() - if err != nil { - return 0, err - } - - // [0.0, 1.0) * 2 ^ attempts - ri := int64(1 << uint64(attempt)) - delaySeconds := b * float64(ri) - - return timeconv.FloatSecondsDur(delaySeconds), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go deleted file mode 100644 index 7a3f183018..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go +++ /dev/null @@ -1,52 +0,0 @@ -package retry - -import ( - awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" -) - -// attemptResultsKey is a metadata accessor key to retrieve metadata -// for all request attempts. -type attemptResultsKey struct { -} - -// GetAttemptResults retrieves attempts results from middleware metadata. -func GetAttemptResults(metadata middleware.Metadata) (AttemptResults, bool) { - m, ok := metadata.Get(attemptResultsKey{}).(AttemptResults) - return m, ok -} - -// AttemptResults represents struct containing metadata returned by all request attempts. -type AttemptResults struct { - - // Results is a slice consisting attempt result from all request attempts. - // Results are stored in order request attempt is made. - Results []AttemptResult -} - -// AttemptResult represents attempt result returned by a single request attempt. -type AttemptResult struct { - - // Err is the error if received for the request attempt. - Err error - - // Retryable denotes if request may be retried. This states if an - // error is considered retryable. - Retryable bool - - // Retried indicates if this request was retried. - Retried bool - - // ResponseMetadata is any existing metadata passed via the response middlewares. - ResponseMetadata middleware.Metadata -} - -// addAttemptResults adds attempt results to middleware metadata -func addAttemptResults(metadata *middleware.Metadata, v AttemptResults) { - metadata.Set(attemptResultsKey{}, v) -} - -// GetRawResponse returns raw response recorded for the attempt result -func (a AttemptResult) GetRawResponse() interface{} { - return awsmiddle.GetRawResponse(a.ResponseMetadata) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go deleted file mode 100644 index dc703d482d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ /dev/null @@ -1,340 +0,0 @@ -package retry - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go/logging" - smithymiddle "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/transport/http" -) - -// RequestCloner is a function that can take an input request type and clone -// the request for use in a subsequent retry attempt. -type RequestCloner func(interface{}) interface{} - -type retryMetadata struct { - AttemptNum int - AttemptTime time.Time - MaxAttempts int - AttemptClockSkew time.Duration -} - -// Attempt is a Smithy Finalize middleware that handles retry attempts using -// the provided Retryer implementation. -type Attempt struct { - // Enable the logging of retry attempts performed by the SDK. This will - // include logging retry attempts, unretryable errors, and when max - // attempts are reached. - LogAttempts bool - - retryer aws.RetryerV2 - requestCloner RequestCloner -} - -// NewAttemptMiddleware returns a new Attempt retry middleware. -func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt { - m := &Attempt{ - retryer: wrapAsRetryerV2(retryer), - requestCloner: requestCloner, - } - for _, fn := range optFns { - fn(m) - } - return m -} - -// ID returns the middleware identifier -func (r *Attempt) ID() string { return "Retry" } - -func (r Attempt) logf(logger logging.Logger, classification logging.Classification, format string, v ...interface{}) { - if !r.LogAttempts { - return - } - logger.Logf(classification, format, v...) -} - -// HandleFinalize utilizes the provider Retryer implementation to attempt -// retries over the next handler -func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( - out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, -) { - var attemptNum int - var attemptClockSkew time.Duration - var attemptResults AttemptResults - - maxAttempts := r.retryer.MaxAttempts() - releaseRetryToken := nopRelease - - for { - attemptNum++ - attemptInput := in - attemptInput.Request = r.requestCloner(attemptInput.Request) - - // Record the metadata for the for attempt being started. - attemptCtx := setRetryMetadata(ctx, retryMetadata{ - AttemptNum: attemptNum, - AttemptTime: sdk.NowTime().UTC(), - MaxAttempts: maxAttempts, - AttemptClockSkew: attemptClockSkew, - }) - - var attemptResult AttemptResult - out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next) - attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata) - - // AttemptResult Retried states that the attempt was not successful, and - // should be retried. - shouldRetry := attemptResult.Retried - - // Add attempt metadata to list of all attempt metadata - attemptResults.Results = append(attemptResults.Results, attemptResult) - - if !shouldRetry { - // Ensure the last response's metadata is used as the bases for result - // metadata returned by the stack. The Slice of attempt results - // will be added to this cloned metadata. - metadata = attemptResult.ResponseMetadata.Clone() - - break - } - } - - addAttemptResults(&metadata, attemptResults) - return out, metadata, err -} - -// handleAttempt handles an individual request attempt. -func (r *Attempt) handleAttempt( - ctx context.Context, in smithymiddle.FinalizeInput, releaseRetryToken func(error) error, next smithymiddle.FinalizeHandler, -) ( - out smithymiddle.FinalizeOutput, attemptResult AttemptResult, _ func(error) error, err error, -) { - defer func() { - attemptResult.Err = err - }() - - // Short circuit if this attempt never can succeed because the context is - // canceled. This reduces the chance of token pools being modified for - // attempts that will not be made - select { - case <-ctx.Done(): - return out, attemptResult, nopRelease, ctx.Err() - default: - } - - //------------------------------ - // Get Attempt Token - //------------------------------ - releaseAttemptToken, err := r.retryer.GetAttemptToken(ctx) - if err != nil { - return out, attemptResult, nopRelease, fmt.Errorf( - "failed to get retry Send token, %w", err) - } - - //------------------------------ - // Send Attempt - //------------------------------ - logger := smithymiddle.GetLogger(ctx) - service, operation := awsmiddle.GetServiceID(ctx), awsmiddle.GetOperationName(ctx) - retryMetadata, _ := getRetryMetadata(ctx) - attemptNum := retryMetadata.AttemptNum - maxAttempts := retryMetadata.MaxAttempts - - // Following attempts must ensure the request payload stream starts in a - // rewound state. - if attemptNum > 1 { - if rewindable, ok := in.Request.(interface{ RewindStream() error }); ok { - if rewindErr := rewindable.RewindStream(); rewindErr != nil { - return out, attemptResult, nopRelease, fmt.Errorf( - "failed to rewind transport stream for retry, %w", rewindErr) - } - } - - r.logf(logger, logging.Debug, "retrying request %s/%s, attempt %d", - service, operation, attemptNum) - } - - var metadata smithymiddle.Metadata - out, metadata, err = next.HandleFinalize(ctx, in) - attemptResult.ResponseMetadata = metadata - - //------------------------------ - // Bookkeeping - //------------------------------ - // Release the retry token based on the state of the attempt's error (if any). - if releaseError := releaseRetryToken(err); releaseError != nil && err != nil { - return out, attemptResult, nopRelease, fmt.Errorf( - "failed to release retry token after request error, %w", err) - } - // Release the attempt token based on the state of the attempt's error (if any). - if releaseError := releaseAttemptToken(err); releaseError != nil && err != nil { - return out, attemptResult, nopRelease, fmt.Errorf( - "failed to release initial token after request error, %w", err) - } - // If there was no error making the attempt, nothing further to do. There - // will be nothing to retry. - if err == nil { - return out, attemptResult, nopRelease, err - } - - //------------------------------ - // Is Retryable and Should Retry - //------------------------------ - // If the attempt failed with an unretryable error, nothing further to do - // but return, and inform the caller about the terminal failure. - retryable := r.retryer.IsErrorRetryable(err) - if !retryable { - r.logf(logger, logging.Debug, "request failed with unretryable error %v", err) - return out, attemptResult, nopRelease, err - } - - // set retryable to true - attemptResult.Retryable = true - - // Once the maximum number of attempts have been exhausted there is nothing - // further to do other than inform the caller about the terminal failure. - if maxAttempts > 0 && attemptNum >= maxAttempts { - r.logf(logger, logging.Debug, "max retry attempts exhausted, max %d", maxAttempts) - err = &MaxAttemptsError{ - Attempt: attemptNum, - Err: err, - } - return out, attemptResult, nopRelease, err - } - - //------------------------------ - // Get Retry (aka Retry Quota) Token - //------------------------------ - // Get a retry token that will be released after the - releaseRetryToken, retryTokenErr := r.retryer.GetRetryToken(ctx, err) - if retryTokenErr != nil { - return out, attemptResult, nopRelease, retryTokenErr - } - - //------------------------------ - // Retry Delay and Sleep - //------------------------------ - // Get the retry delay before another attempt can be made, and sleep for - // that time. Potentially early exist if the sleep is canceled via the - // context. - retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err) - mctx := metrics.Context(ctx) - if mctx != nil { - attempt, err := mctx.Data().LatestAttempt() - if err != nil { - attempt.RetryDelay = retryDelay - } - } - if reqErr != nil { - return out, attemptResult, releaseRetryToken, reqErr - } - if reqErr = sdk.SleepWithContext(ctx, retryDelay); reqErr != nil { - err = &aws.RequestCanceledError{Err: reqErr} - return out, attemptResult, releaseRetryToken, err - } - - // The request should be re-attempted. - attemptResult.Retried = true - - return out, attemptResult, releaseRetryToken, err -} - -// MetricsHeader attaches SDK request metric header for retries to the transport -type MetricsHeader struct{} - -// ID returns the middleware identifier -func (r *MetricsHeader) ID() string { - return "RetryMetricsHeader" -} - -// HandleFinalize attaches the SDK request metric header to the transport layer -func (r MetricsHeader) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( - out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, -) { - retryMetadata, _ := getRetryMetadata(ctx) - - const retryMetricHeader = "Amz-Sdk-Request" - var parts []string - - parts = append(parts, "attempt="+strconv.Itoa(retryMetadata.AttemptNum)) - if retryMetadata.MaxAttempts != 0 { - parts = append(parts, "max="+strconv.Itoa(retryMetadata.MaxAttempts)) - } - - var ttl time.Time - if deadline, ok := ctx.Deadline(); ok { - ttl = deadline - } - - // Only append the TTL if it can be determined. - if !ttl.IsZero() && retryMetadata.AttemptClockSkew > 0 { - const unixTimeFormat = "20060102T150405Z" - ttl = ttl.Add(retryMetadata.AttemptClockSkew) - parts = append(parts, "ttl="+ttl.Format(unixTimeFormat)) - } - - switch req := in.Request.(type) { - case *http.Request: - req.Header[retryMetricHeader] = append(req.Header[retryMetricHeader][:0], strings.Join(parts, "; ")) - default: - return out, metadata, fmt.Errorf("unknown transport type %T", req) - } - - return next.HandleFinalize(ctx, in) -} - -type retryMetadataKey struct{} - -// getRetryMetadata retrieves retryMetadata from the context and a bool -// indicating if it was set. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) { - metadata, ok = smithymiddle.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata) - return metadata, ok -} - -// setRetryMetadata sets the retryMetadata on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context { - return smithymiddle.WithStackValue(ctx, retryMetadataKey{}, metadata) -} - -// AddRetryMiddlewaresOptions is the set of options that can be passed to -// AddRetryMiddlewares for configuring retry associated middleware. -type AddRetryMiddlewaresOptions struct { - Retryer aws.Retryer - - // Enable the logging of retry attempts performed by the SDK. This will - // include logging retry attempts, unretryable errors, and when max - // attempts are reached. - LogRetryAttempts bool -} - -// AddRetryMiddlewares adds retry middleware to operation middleware stack -func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresOptions) error { - attempt := NewAttemptMiddleware(options.Retryer, http.RequestCloner, func(middleware *Attempt) { - middleware.LogAttempts = options.LogRetryAttempts - }) - - // index retry to before signing, if signing exists - if err := stack.Finalize.Insert(attempt, "Signing", smithymiddle.Before); err != nil { - return err - } - - if err := stack.Finalize.Insert(&MetricsHeader{}, attempt.ID(), smithymiddle.After); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go deleted file mode 100644 index af81635b3f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go +++ /dev/null @@ -1,90 +0,0 @@ -package retry - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// AddWithErrorCodes returns a Retryer with additional error codes considered -// for determining if the error should be retried. -func AddWithErrorCodes(r aws.Retryer, codes ...string) aws.Retryer { - retryable := &RetryableErrorCode{ - Codes: map[string]struct{}{}, - } - for _, c := range codes { - retryable.Codes[c] = struct{}{} - } - - return &withIsErrorRetryable{ - RetryerV2: wrapAsRetryerV2(r), - Retryable: retryable, - } -} - -type withIsErrorRetryable struct { - aws.RetryerV2 - Retryable IsErrorRetryable -} - -func (r *withIsErrorRetryable) IsErrorRetryable(err error) bool { - if v := r.Retryable.IsErrorRetryable(err); v != aws.UnknownTernary { - return v.Bool() - } - return r.RetryerV2.IsErrorRetryable(err) -} - -// AddWithMaxAttempts returns a Retryer with MaxAttempts set to the value -// specified. -func AddWithMaxAttempts(r aws.Retryer, max int) aws.Retryer { - return &withMaxAttempts{ - RetryerV2: wrapAsRetryerV2(r), - Max: max, - } -} - -type withMaxAttempts struct { - aws.RetryerV2 - Max int -} - -func (w *withMaxAttempts) MaxAttempts() int { - return w.Max -} - -// AddWithMaxBackoffDelay returns a retryer wrapping the passed in retryer -// overriding the RetryDelay behavior for a alternate minimum initial backoff -// delay. -func AddWithMaxBackoffDelay(r aws.Retryer, delay time.Duration) aws.Retryer { - return &withMaxBackoffDelay{ - RetryerV2: wrapAsRetryerV2(r), - backoff: NewExponentialJitterBackoff(delay), - } -} - -type withMaxBackoffDelay struct { - aws.RetryerV2 - backoff *ExponentialJitterBackoff -} - -func (r *withMaxBackoffDelay) RetryDelay(attempt int, err error) (time.Duration, error) { - return r.backoff.BackoffDelay(attempt, err) -} - -type wrappedAsRetryerV2 struct { - aws.Retryer -} - -func wrapAsRetryerV2(r aws.Retryer) aws.RetryerV2 { - v, ok := r.(aws.RetryerV2) - if !ok { - v = wrappedAsRetryerV2{Retryer: r} - } - - return v -} - -func (w wrappedAsRetryerV2) GetAttemptToken(context.Context) (func(error) error, error) { - return w.Retryer.GetInitialToken(), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go deleted file mode 100644 index 987affdde6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go +++ /dev/null @@ -1,201 +0,0 @@ -package retry - -import ( - "errors" - "net" - "net/url" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// IsErrorRetryable provides the interface of an implementation to determine if -// a error as the result of an operation is retryable. -type IsErrorRetryable interface { - IsErrorRetryable(error) aws.Ternary -} - -// IsErrorRetryables is a collection of checks to determine of the error is -// retryable. Iterates through the checks and returns the state of retryable -// if any check returns something other than unknown. -type IsErrorRetryables []IsErrorRetryable - -// IsErrorRetryable returns if the error is retryable if any of the checks in -// the list return a value other than unknown. -func (r IsErrorRetryables) IsErrorRetryable(err error) aws.Ternary { - for _, re := range r { - if v := re.IsErrorRetryable(err); v != aws.UnknownTernary { - return v - } - } - return aws.UnknownTernary -} - -// IsErrorRetryableFunc wraps a function with the IsErrorRetryable interface. -type IsErrorRetryableFunc func(error) aws.Ternary - -// IsErrorRetryable returns if the error is retryable. -func (fn IsErrorRetryableFunc) IsErrorRetryable(err error) aws.Ternary { - return fn(err) -} - -// RetryableError is an IsErrorRetryable implementation which uses the -// optional interface Retryable on the error value to determine if the error is -// retryable. -type RetryableError struct{} - -// IsErrorRetryable returns if the error is retryable if it satisfies the -// Retryable interface, and returns if the attempt should be retried. -func (RetryableError) IsErrorRetryable(err error) aws.Ternary { - var v interface{ RetryableError() bool } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - return aws.BoolTernary(v.RetryableError()) -} - -// NoRetryCanceledError detects if the error was an request canceled error and -// returns if so. -type NoRetryCanceledError struct{} - -// IsErrorRetryable returns the error is not retryable if the request was -// canceled. -func (NoRetryCanceledError) IsErrorRetryable(err error) aws.Ternary { - var v interface{ CanceledError() bool } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - if v.CanceledError() { - return aws.FalseTernary - } - return aws.UnknownTernary -} - -// RetryableConnectionError determines if the underlying error is an HTTP -// connection and returns if it should be retried. -// -// Includes errors such as connection reset, connection refused, net dial, -// temporary, and timeout errors. -type RetryableConnectionError struct{} - -// IsErrorRetryable returns if the error is caused by and HTTP connection -// error, and should be retried. -func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { - if err == nil { - return aws.UnknownTernary - } - var retryable bool - - var conErr interface{ ConnectionError() bool } - var tempErr interface{ Temporary() bool } - var timeoutErr interface{ Timeout() bool } - var urlErr *url.Error - var netOpErr *net.OpError - var dnsError *net.DNSError - - if errors.As(err, &dnsError) { - // NXDOMAIN errors should not be retried - if dnsError.IsNotFound { - return aws.BoolTernary(false) - } - - // if !dnsError.Temporary(), error may or may not be temporary, - // (i.e. !Temporary() =/=> !retryable) so we should fall through to - // remaining checks - if dnsError.Temporary() { - return aws.BoolTernary(true) - } - } - - switch { - case errors.As(err, &conErr) && conErr.ConnectionError(): - retryable = true - - case strings.Contains(err.Error(), "connection reset"): - retryable = true - - case errors.As(err, &urlErr): - // Refused connections should be retried as the service may not yet be - // running on the port. Go TCP dial considers refused connections as - // not temporary. - if strings.Contains(urlErr.Error(), "connection refused") { - retryable = true - } else { - return r.IsErrorRetryable(errors.Unwrap(urlErr)) - } - - case errors.As(err, &netOpErr): - // Network dial, or temporary network errors are always retryable. - if strings.EqualFold(netOpErr.Op, "dial") || netOpErr.Temporary() { - retryable = true - } else { - return r.IsErrorRetryable(errors.Unwrap(netOpErr)) - } - - case errors.As(err, &tempErr) && tempErr.Temporary(): - // Fallback to the generic temporary check, with temporary errors - // retryable. - retryable = true - - case errors.As(err, &timeoutErr) && timeoutErr.Timeout(): - // Fallback to the generic timeout check, with timeout errors - // retryable. - retryable = true - - default: - return aws.UnknownTernary - } - - return aws.BoolTernary(retryable) - -} - -// RetryableHTTPStatusCode provides a IsErrorRetryable based on HTTP status -// codes. -type RetryableHTTPStatusCode struct { - Codes map[int]struct{} -} - -// IsErrorRetryable return if the passed in error is retryable based on the -// HTTP status code. -func (r RetryableHTTPStatusCode) IsErrorRetryable(err error) aws.Ternary { - var v interface{ HTTPStatusCode() int } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - _, ok := r.Codes[v.HTTPStatusCode()] - if !ok { - return aws.UnknownTernary - } - - return aws.TrueTernary -} - -// RetryableErrorCode determines if an attempt should be retried based on the -// API error code. -type RetryableErrorCode struct { - Codes map[string]struct{} -} - -// IsErrorRetryable return if the error is retryable based on the error codes. -// Returns unknown if the error doesn't have a code or it is unknown. -func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary { - var v interface{ ErrorCode() string } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - _, ok := r.Codes[v.ErrorCode()] - if !ok { - return aws.UnknownTernary - } - - return aws.TrueTernary -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go deleted file mode 100644 index d5ea93222e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go +++ /dev/null @@ -1,269 +0,0 @@ -package retry - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws/ratelimit" -) - -// BackoffDelayer provides the interface for determining the delay to before -// another request attempt, that previously failed. -type BackoffDelayer interface { - BackoffDelay(attempt int, err error) (time.Duration, error) -} - -// BackoffDelayerFunc provides a wrapper around a function to determine the -// backoff delay of an attempt retry. -type BackoffDelayerFunc func(int, error) (time.Duration, error) - -// BackoffDelay returns the delay before attempt to retry a request. -func (fn BackoffDelayerFunc) BackoffDelay(attempt int, err error) (time.Duration, error) { - return fn(attempt, err) -} - -const ( - // DefaultMaxAttempts is the maximum of attempts for an API request - DefaultMaxAttempts int = 3 - - // DefaultMaxBackoff is the maximum back off delay between attempts - DefaultMaxBackoff time.Duration = 20 * time.Second -) - -// Default retry token quota values. -const ( - DefaultRetryRateTokens uint = 500 - DefaultRetryCost uint = 5 - DefaultRetryTimeoutCost uint = 10 - DefaultNoRetryIncrement uint = 1 -) - -// DefaultRetryableHTTPStatusCodes is the default set of HTTP status codes the SDK -// should consider as retryable errors. -var DefaultRetryableHTTPStatusCodes = map[int]struct{}{ - 500: {}, - 502: {}, - 503: {}, - 504: {}, -} - -// DefaultRetryableErrorCodes provides the set of API error codes that should -// be retried. -var DefaultRetryableErrorCodes = map[string]struct{}{ - "RequestTimeout": {}, - "RequestTimeoutException": {}, -} - -// DefaultThrottleErrorCodes provides the set of API error codes that are -// considered throttle errors. -var DefaultThrottleErrorCodes = map[string]struct{}{ - "Throttling": {}, - "ThrottlingException": {}, - "ThrottledException": {}, - "RequestThrottledException": {}, - "TooManyRequestsException": {}, - "ProvisionedThroughputExceededException": {}, - "TransactionInProgressException": {}, - "RequestLimitExceeded": {}, - "BandwidthLimitExceeded": {}, - "LimitExceededException": {}, - "RequestThrottled": {}, - "SlowDown": {}, - "PriorRequestNotComplete": {}, - "EC2ThrottledException": {}, -} - -// DefaultRetryables provides the set of retryable checks that are used by -// default. -var DefaultRetryables = []IsErrorRetryable{ - NoRetryCanceledError{}, - RetryableError{}, - RetryableConnectionError{}, - RetryableHTTPStatusCode{ - Codes: DefaultRetryableHTTPStatusCodes, - }, - RetryableErrorCode{ - Codes: DefaultRetryableErrorCodes, - }, - RetryableErrorCode{ - Codes: DefaultThrottleErrorCodes, - }, -} - -// DefaultTimeouts provides the set of timeout checks that are used by default. -var DefaultTimeouts = []IsErrorTimeout{ - TimeouterError{}, -} - -// StandardOptions provides the functional options for configuring the standard -// retryable, and delay behavior. -type StandardOptions struct { - // Maximum number of attempts that should be made. - MaxAttempts int - - // MaxBackoff duration between retried attempts. - MaxBackoff time.Duration - - // Provides the backoff strategy the retryer will use to determine the - // delay between retry attempts. - Backoff BackoffDelayer - - // Set of strategies to determine if the attempt should be retried based on - // the error response received. - // - // It is safe to append to this list in NewStandard's functional options. - Retryables []IsErrorRetryable - - // Set of strategies to determine if the attempt failed due to a timeout - // error. - // - // It is safe to append to this list in NewStandard's functional options. - Timeouts []IsErrorTimeout - - // Provides the rate limiting strategy for rate limiting attempt retries - // across all attempts the retryer is being used with. - // - // A RateLimiter operates as a token bucket with a set capacity, where - // attempt failures events consume tokens. A retry attempt that attempts to - // consume more tokens than what's available results in operation failure. - // The default implementation is parameterized as follows: - // - a capacity of 500 (DefaultRetryRateTokens) - // - a retry caused by a timeout costs 10 tokens (DefaultRetryCost) - // - a retry caused by other errors costs 5 tokens (DefaultRetryTimeoutCost) - // - an operation that succeeds on the 1st attempt adds 1 token (DefaultNoRetryIncrement) - // - // You can disable rate limiting by setting this field to ratelimit.None. - RateLimiter RateLimiter - - // The cost to deduct from the RateLimiter's token bucket per retry. - RetryCost uint - - // The cost to deduct from the RateLimiter's token bucket per retry caused - // by timeout error. - RetryTimeoutCost uint - - // The cost to payback to the RateLimiter's token bucket for successful - // attempts. - NoRetryIncrement uint -} - -// RateLimiter provides the interface for limiting the rate of attempt retries -// allowed by the retryer. -type RateLimiter interface { - GetToken(ctx context.Context, cost uint) (releaseToken func() error, err error) - AddTokens(uint) error -} - -// Standard is the standard retry pattern for the SDK. It uses a set of -// retryable checks to determine of the failed attempt should be retried, and -// what retry delay should be used. -type Standard struct { - options StandardOptions - - timeout IsErrorTimeout - retryable IsErrorRetryable - backoff BackoffDelayer -} - -// NewStandard initializes a standard retry behavior with defaults that can be -// overridden via functional options. -func NewStandard(fnOpts ...func(*StandardOptions)) *Standard { - o := StandardOptions{ - MaxAttempts: DefaultMaxAttempts, - MaxBackoff: DefaultMaxBackoff, - Retryables: append([]IsErrorRetryable{}, DefaultRetryables...), - Timeouts: append([]IsErrorTimeout{}, DefaultTimeouts...), - - RateLimiter: ratelimit.NewTokenRateLimit(DefaultRetryRateTokens), - RetryCost: DefaultRetryCost, - RetryTimeoutCost: DefaultRetryTimeoutCost, - NoRetryIncrement: DefaultNoRetryIncrement, - } - for _, fn := range fnOpts { - fn(&o) - } - if o.MaxAttempts <= 0 { - o.MaxAttempts = DefaultMaxAttempts - } - - backoff := o.Backoff - if backoff == nil { - backoff = NewExponentialJitterBackoff(o.MaxBackoff) - } - - return &Standard{ - options: o, - backoff: backoff, - retryable: IsErrorRetryables(o.Retryables), - timeout: IsErrorTimeouts(o.Timeouts), - } -} - -// MaxAttempts returns the maximum number of attempts that can be made for a -// request before failing. -func (s *Standard) MaxAttempts() int { - return s.options.MaxAttempts -} - -// IsErrorRetryable returns if the error is can be retried or not. Should not -// consider the number of attempts made. -func (s *Standard) IsErrorRetryable(err error) bool { - return s.retryable.IsErrorRetryable(err).Bool() -} - -// RetryDelay returns the delay to use before another request attempt is made. -func (s *Standard) RetryDelay(attempt int, err error) (time.Duration, error) { - return s.backoff.BackoffDelay(attempt, err) -} - -// GetAttemptToken returns the token to be released after then attempt completes. -// The release token will add NoRetryIncrement to the RateLimiter token pool if -// the attempt was successful. If the attempt failed, nothing will be done. -func (s *Standard) GetAttemptToken(context.Context) (func(error) error, error) { - return s.GetInitialToken(), nil -} - -// GetInitialToken returns a token for adding the NoRetryIncrement to the -// RateLimiter token if the attempt completed successfully without error. -// -// InitialToken applies to result of the each attempt, including the first. -// Whereas the RetryToken applies to the result of subsequent attempts. -// -// Deprecated: use GetAttemptToken instead. -func (s *Standard) GetInitialToken() func(error) error { - return releaseToken(s.noRetryIncrement).release -} - -func (s *Standard) noRetryIncrement() error { - return s.options.RateLimiter.AddTokens(s.options.NoRetryIncrement) -} - -// GetRetryToken attempts to deduct the retry cost from the retry token pool. -// Returning the token release function, or error. -func (s *Standard) GetRetryToken(ctx context.Context, opErr error) (func(error) error, error) { - cost := s.options.RetryCost - - if s.timeout.IsErrorTimeout(opErr).Bool() { - cost = s.options.RetryTimeoutCost - } - - fn, err := s.options.RateLimiter.GetToken(ctx, cost) - if err != nil { - return nil, fmt.Errorf("failed to get rate limit token, %w", err) - } - - return releaseToken(fn).release, nil -} - -func nopRelease(error) error { return nil } - -type releaseToken func() error - -func (f releaseToken) release(err error) error { - if err != nil { - return nil - } - - return f() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go deleted file mode 100644 index c4b844d15f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go +++ /dev/null @@ -1,60 +0,0 @@ -package retry - -import ( - "errors" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// IsErrorThrottle provides the interface of an implementation to determine if -// a error response from an operation is a throttling error. -type IsErrorThrottle interface { - IsErrorThrottle(error) aws.Ternary -} - -// IsErrorThrottles is a collection of checks to determine of the error a -// throttle error. Iterates through the checks and returns the state of -// throttle if any check returns something other than unknown. -type IsErrorThrottles []IsErrorThrottle - -// IsErrorThrottle returns if the error is a throttle error if any of the -// checks in the list return a value other than unknown. -func (r IsErrorThrottles) IsErrorThrottle(err error) aws.Ternary { - for _, re := range r { - if v := re.IsErrorThrottle(err); v != aws.UnknownTernary { - return v - } - } - return aws.UnknownTernary -} - -// IsErrorThrottleFunc wraps a function with the IsErrorThrottle interface. -type IsErrorThrottleFunc func(error) aws.Ternary - -// IsErrorThrottle returns if the error is a throttle error. -func (fn IsErrorThrottleFunc) IsErrorThrottle(err error) aws.Ternary { - return fn(err) -} - -// ThrottleErrorCode determines if an attempt should be retried based on the -// API error code. -type ThrottleErrorCode struct { - Codes map[string]struct{} -} - -// IsErrorThrottle return if the error is a throttle error based on the error -// codes. Returns unknown if the error doesn't have a code or it is unknown. -func (r ThrottleErrorCode) IsErrorThrottle(err error) aws.Ternary { - var v interface{ ErrorCode() string } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - _, ok := r.Codes[v.ErrorCode()] - if !ok { - return aws.UnknownTernary - } - - return aws.TrueTernary -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go deleted file mode 100644 index 3d47870d2d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go +++ /dev/null @@ -1,52 +0,0 @@ -package retry - -import ( - "errors" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// IsErrorTimeout provides the interface of an implementation to determine if -// a error matches. -type IsErrorTimeout interface { - IsErrorTimeout(err error) aws.Ternary -} - -// IsErrorTimeouts is a collection of checks to determine of the error is -// retryable. Iterates through the checks and returns the state of retryable -// if any check returns something other than unknown. -type IsErrorTimeouts []IsErrorTimeout - -// IsErrorTimeout returns if the error is retryable if any of the checks in -// the list return a value other than unknown. -func (ts IsErrorTimeouts) IsErrorTimeout(err error) aws.Ternary { - for _, t := range ts { - if v := t.IsErrorTimeout(err); v != aws.UnknownTernary { - return v - } - } - return aws.UnknownTernary -} - -// IsErrorTimeoutFunc wraps a function with the IsErrorTimeout interface. -type IsErrorTimeoutFunc func(error) aws.Ternary - -// IsErrorTimeout returns if the error is retryable. -func (fn IsErrorTimeoutFunc) IsErrorTimeout(err error) aws.Ternary { - return fn(err) -} - -// TimeouterError provides the IsErrorTimeout implementation for determining if -// an error is a timeout based on type with the Timeout method. -type TimeouterError struct{} - -// IsErrorTimeout returns if the error is a timeout error. -func (t TimeouterError) IsErrorTimeout(err error) aws.Ternary { - var v interface{ Timeout() bool } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - return aws.BoolTernary(v.Timeout()) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go deleted file mode 100644 index b0ba4cb2f0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go +++ /dev/null @@ -1,127 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "time" -) - -// RetryMode provides the mode the API client will use to create a retryer -// based on. -type RetryMode string - -const ( - // RetryModeStandard model provides rate limited retry attempts with - // exponential backoff delay. - RetryModeStandard RetryMode = "standard" - - // RetryModeAdaptive model provides attempt send rate limiting on throttle - // responses in addition to standard mode's retry rate limiting. - // - // Adaptive retry mode is experimental and is subject to change in the - // future. - RetryModeAdaptive RetryMode = "adaptive" -) - -// ParseRetryMode attempts to parse a RetryMode from the given string. -// Returning error if the value is not a known RetryMode. -func ParseRetryMode(v string) (mode RetryMode, err error) { - switch v { - case "standard": - return RetryModeStandard, nil - case "adaptive": - return RetryModeAdaptive, nil - default: - return mode, fmt.Errorf("unknown RetryMode, %v", v) - } -} - -func (m RetryMode) String() string { return string(m) } - -// Retryer is an interface to determine if a given error from a -// attempt should be retried, and if so what backoff delay to apply. The -// default implementation used by most services is the retry package's Standard -// type. Which contains basic retry logic using exponential backoff. -type Retryer interface { - // IsErrorRetryable returns if the failed attempt is retryable. This check - // should determine if the error can be retried, or if the error is - // terminal. - IsErrorRetryable(error) bool - - // MaxAttempts returns the maximum number of attempts that can be made for - // an attempt before failing. A value of 0 implies that the attempt should - // be retried until it succeeds if the errors are retryable. - MaxAttempts() int - - // RetryDelay returns the delay that should be used before retrying the - // attempt. Will return error if the delay could not be determined. - RetryDelay(attempt int, opErr error) (time.Duration, error) - - // GetRetryToken attempts to deduct the retry cost from the retry token pool. - // Returning the token release function, or error. - GetRetryToken(ctx context.Context, opErr error) (releaseToken func(error) error, err error) - - // GetInitialToken returns the initial attempt token that can increment the - // retry token pool if the attempt is successful. - GetInitialToken() (releaseToken func(error) error) -} - -// RetryerV2 is an interface to determine if a given error from an attempt -// should be retried, and if so what backoff delay to apply. The default -// implementation used by most services is the retry package's Standard type. -// Which contains basic retry logic using exponential backoff. -// -// RetryerV2 replaces the Retryer interface, deprecating the GetInitialToken -// method in favor of GetAttemptToken which takes a context, and can return an error. -// -// The SDK's retry package's Attempt middleware, and utilities will always -// wrap a Retryer as a RetryerV2. Delegating to GetInitialToken, only if -// GetAttemptToken is not implemented. -type RetryerV2 interface { - Retryer - - // GetInitialToken returns the initial attempt token that can increment the - // retry token pool if the attempt is successful. - // - // Deprecated: This method does not provide a way to block using Context, - // nor can it return an error. Use RetryerV2, and GetAttemptToken instead. - GetInitialToken() (releaseToken func(error) error) - - // GetAttemptToken returns the send token that can be used to rate limit - // attempt calls. Will be used by the SDK's retry package's Attempt - // middleware to get a send token prior to calling the temp and releasing - // the send token after the attempt has been made. - GetAttemptToken(context.Context) (func(error) error, error) -} - -// NopRetryer provides a RequestRetryDecider implementation that will flag -// all attempt errors as not retryable, with a max attempts of 1. -type NopRetryer struct{} - -// IsErrorRetryable returns false for all error values. -func (NopRetryer) IsErrorRetryable(error) bool { return false } - -// MaxAttempts always returns 1 for the original attempt. -func (NopRetryer) MaxAttempts() int { return 1 } - -// RetryDelay is not valid for the NopRetryer. Will always return error. -func (NopRetryer) RetryDelay(int, error) (time.Duration, error) { - return 0, fmt.Errorf("not retrying any attempt errors") -} - -// GetRetryToken returns a stub function that does nothing. -func (NopRetryer) GetRetryToken(context.Context, error) (func(error) error, error) { - return nopReleaseToken, nil -} - -// GetInitialToken returns a stub function that does nothing. -func (NopRetryer) GetInitialToken() func(error) error { - return nopReleaseToken -} - -// GetAttemptToken returns a stub function that does nothing. -func (NopRetryer) GetAttemptToken(context.Context) (func(error) error, error) { - return nopReleaseToken, nil -} - -func nopReleaseToken(error) error { return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go deleted file mode 100644 index 3af9b2b336..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go +++ /dev/null @@ -1,14 +0,0 @@ -package aws - -// ExecutionEnvironmentID is the AWS execution environment runtime identifier. -type ExecutionEnvironmentID string - -// RuntimeEnvironment is a collection of values that are determined at runtime -// based on the environment that the SDK is executing in. Some of these values -// may or may not be present based on the executing environment and certain SDK -// configuration properties that drive whether these values are populated.. -type RuntimeEnvironment struct { - EnvironmentIdentifier ExecutionEnvironmentID - Region string - EC2InstanceMetadataRegion string -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go deleted file mode 100644 index cbf22f1d0b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go +++ /dev/null @@ -1,115 +0,0 @@ -package v4 - -import ( - "strings" - "sync" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -func lookupKey(service, region string) string { - var s strings.Builder - s.Grow(len(region) + len(service) + 3) - s.WriteString(region) - s.WriteRune('/') - s.WriteString(service) - return s.String() -} - -type derivedKey struct { - AccessKey string - Date time.Time - Credential []byte -} - -type derivedKeyCache struct { - values map[string]derivedKey - mutex sync.RWMutex -} - -func newDerivedKeyCache() derivedKeyCache { - return derivedKeyCache{ - values: make(map[string]derivedKey), - } -} - -func (s *derivedKeyCache) Get(credentials aws.Credentials, service, region string, signingTime SigningTime) []byte { - key := lookupKey(service, region) - s.mutex.RLock() - if cred, ok := s.get(key, credentials, signingTime.Time); ok { - s.mutex.RUnlock() - return cred - } - s.mutex.RUnlock() - - s.mutex.Lock() - if cred, ok := s.get(key, credentials, signingTime.Time); ok { - s.mutex.Unlock() - return cred - } - cred := deriveKey(credentials.SecretAccessKey, service, region, signingTime) - entry := derivedKey{ - AccessKey: credentials.AccessKeyID, - Date: signingTime.Time, - Credential: cred, - } - s.values[key] = entry - s.mutex.Unlock() - - return cred -} - -func (s *derivedKeyCache) get(key string, credentials aws.Credentials, signingTime time.Time) ([]byte, bool) { - cacheEntry, ok := s.retrieveFromCache(key) - if ok && cacheEntry.AccessKey == credentials.AccessKeyID && isSameDay(signingTime, cacheEntry.Date) { - return cacheEntry.Credential, true - } - return nil, false -} - -func (s *derivedKeyCache) retrieveFromCache(key string) (derivedKey, bool) { - if v, ok := s.values[key]; ok { - return v, true - } - return derivedKey{}, false -} - -// SigningKeyDeriver derives a signing key from a set of credentials -type SigningKeyDeriver struct { - cache derivedKeyCache -} - -// NewSigningKeyDeriver returns a new SigningKeyDeriver -func NewSigningKeyDeriver() *SigningKeyDeriver { - return &SigningKeyDeriver{ - cache: newDerivedKeyCache(), - } -} - -// DeriveKey returns a derived signing key from the given credentials to be used with SigV4 signing. -func (k *SigningKeyDeriver) DeriveKey(credential aws.Credentials, service, region string, signingTime SigningTime) []byte { - return k.cache.Get(credential, service, region, signingTime) -} - -func deriveKey(secret, service, region string, t SigningTime) []byte { - hmacDate := HMACSHA256([]byte("AWS4"+secret), []byte(t.ShortTimeFormat())) - hmacRegion := HMACSHA256(hmacDate, []byte(region)) - hmacService := HMACSHA256(hmacRegion, []byte(service)) - return HMACSHA256(hmacService, []byte("aws4_request")) -} - -func isSameDay(x, y time.Time) bool { - xYear, xMonth, xDay := x.Date() - yYear, yMonth, yDay := y.Date() - - if xYear != yYear { - return false - } - - if xMonth != yMonth { - return false - } - - return xDay == yDay -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go deleted file mode 100644 index a23cb003bf..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go +++ /dev/null @@ -1,40 +0,0 @@ -package v4 - -// Signature Version 4 (SigV4) Constants -const ( - // EmptyStringSHA256 is the hex encoded sha256 value of an empty string - EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` - - // UnsignedPayload indicates that the request payload body is unsigned - UnsignedPayload = "UNSIGNED-PAYLOAD" - - // AmzAlgorithmKey indicates the signing algorithm - AmzAlgorithmKey = "X-Amz-Algorithm" - - // AmzSecurityTokenKey indicates the security token to be used with temporary credentials - AmzSecurityTokenKey = "X-Amz-Security-Token" - - // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z' - AmzDateKey = "X-Amz-Date" - - // AmzCredentialKey is the access key ID and credential scope - AmzCredentialKey = "X-Amz-Credential" - - // AmzSignedHeadersKey is the set of headers signed for the request - AmzSignedHeadersKey = "X-Amz-SignedHeaders" - - // AmzSignatureKey is the query parameter to store the SigV4 signature - AmzSignatureKey = "X-Amz-Signature" - - // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter - TimeFormat = "20060102T150405Z" - - // ShortTimeFormat is the shorten time format used in the credential scope - ShortTimeFormat = "20060102" - - // ContentSHAKey is the SHA256 of request body - ContentSHAKey = "X-Amz-Content-Sha256" - - // StreamingEventsPayload indicates that the request payload body is a signed event stream. - StreamingEventsPayload = "STREAMING-AWS4-HMAC-SHA256-EVENTS" -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go deleted file mode 100644 index c61955ad5b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go +++ /dev/null @@ -1,82 +0,0 @@ -package v4 - -import ( - sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings" -) - -// Rules houses a set of Rule needed for validation of a -// string value -type Rules []Rule - -// Rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that Rule -type Rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r Rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// MapRule generic Rule for maps -type MapRule map[string]struct{} - -// IsValid for the map Rule satisfies whether it exists in the map -func (m MapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// AllowList is a generic Rule for include listing -type AllowList struct { - Rule -} - -// IsValid for AllowList checks if the value is within the AllowList -func (w AllowList) IsValid(value string) bool { - return w.Rule.IsValid(value) -} - -// ExcludeList is a generic Rule for exclude listing -type ExcludeList struct { - Rule -} - -// IsValid for AllowList checks if the value is within the AllowList -func (b ExcludeList) IsValid(value string) bool { - return !b.Rule.IsValid(value) -} - -// Patterns is a list of strings to match against -type Patterns []string - -// IsValid for Patterns checks each pattern and returns if a match has -// been found -func (p Patterns) IsValid(value string) bool { - for _, pattern := range p { - if sdkstrings.HasPrefixFold(value, pattern) { - return true - } - } - return false -} - -// InclusiveRules rules allow for rules to depend on one another -type InclusiveRules []Rule - -// IsValid will return true if all rules are true -func (r InclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go deleted file mode 100644 index ca738f234b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go +++ /dev/null @@ -1,71 +0,0 @@ -package v4 - -// IgnoredHeaders is a list of headers that are ignored during signing -var IgnoredHeaders = Rules{ - ExcludeList{ - MapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, - "Expect": struct{}{}, - }, - }, -} - -// RequiredSignedHeaders is a allow list for Build canonical headers. -var RequiredSignedHeaders = Rules{ - AllowList{ - MapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Expected-Bucket-Owner": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Context": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - "X-Amz-Content-Sha256": struct{}{}, - "X-Amz-Tagging": struct{}{}, - }, - }, - Patterns{"X-Amz-Object-Lock-"}, - Patterns{"X-Amz-Meta-"}, -} - -// AllowedQueryHoisting is a allowed list for Build query headers. The boolean value -// represents whether or not it is a pattern. -var AllowedQueryHoisting = InclusiveRules{ - ExcludeList{RequiredSignedHeaders}, - Patterns{"X-Amz-"}, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go deleted file mode 100644 index e7fa7a1b1e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go +++ /dev/null @@ -1,13 +0,0 @@ -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" -) - -// HMACSHA256 computes a HMAC-SHA256 of data given the provided key. -func HMACSHA256(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go deleted file mode 100644 index bf93659a43..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go +++ /dev/null @@ -1,75 +0,0 @@ -package v4 - -import ( - "net/http" - "strings" -) - -// SanitizeHostForHeader removes default port from host and updates request.Host -func SanitizeHostForHeader(r *http.Request) { - host := getHost(r) - port := portOnly(host) - if port != "" && isDefaultPort(r.URL.Scheme, port) { - r.Host = stripPort(host) - } -} - -// Returns host from request -func getHost(r *http.Request) string { - if r.Host != "" { - return r.Host - } - - return r.URL.Host -} - -// Hostname returns u.Host, without any port number. -// -// If Host is an IPv6 literal with a port number, Hostname returns the -// IPv6 literal without the square brackets. IPv6 literals may include -// a zone identifier. -// -// Copied from the Go 1.8 standard library (net/url) -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} - -// Port returns the port part of u.Host, without the leading colon. -// If u.Host doesn't contain a port, Port returns an empty string. -// -// Copied from the Go 1.8 standard library (net/url) -func portOnly(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return "" - } - if i := strings.Index(hostport, "]:"); i != -1 { - return hostport[i+len("]:"):] - } - if strings.Contains(hostport, "]") { - return "" - } - return hostport[colon+len(":"):] -} - -// Returns true if the specified URI is using the standard port -// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) -func isDefaultPort(scheme, port string) bool { - if port == "" { - return true - } - - lowerCaseScheme := strings.ToLower(scheme) - if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go deleted file mode 100644 index fc7887909e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go +++ /dev/null @@ -1,13 +0,0 @@ -package v4 - -import "strings" - -// BuildCredentialScope builds the Signature Version 4 (SigV4) signing scope -func BuildCredentialScope(signingTime SigningTime, region, service string) string { - return strings.Join([]string{ - signingTime.ShortTimeFormat(), - region, - service, - "aws4_request", - }, "/") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go deleted file mode 100644 index 1de06a765d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go +++ /dev/null @@ -1,36 +0,0 @@ -package v4 - -import "time" - -// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing. -type SigningTime struct { - time.Time - timeFormat string - shortTimeFormat string -} - -// NewSigningTime creates a new SigningTime given a time.Time -func NewSigningTime(t time.Time) SigningTime { - return SigningTime{ - Time: t, - } -} - -// TimeFormat provides a time formatted in the X-Amz-Date format. -func (m *SigningTime) TimeFormat() string { - return m.format(&m.timeFormat, TimeFormat) -} - -// ShortTimeFormat provides a time formatted of 20060102. -func (m *SigningTime) ShortTimeFormat() string { - return m.format(&m.shortTimeFormat, ShortTimeFormat) -} - -func (m *SigningTime) format(target *string, format string) string { - if len(*target) > 0 { - return *target - } - v := m.Time.Format(format) - *target = v - return v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go deleted file mode 100644 index d025dbaa06..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go +++ /dev/null @@ -1,80 +0,0 @@ -package v4 - -import ( - "net/url" - "strings" -) - -const doubleSpace = " " - -// StripExcessSpaces will rewrite the passed in slice's string values to not -// contain multiple side-by-side spaces. -func StripExcessSpaces(str string) string { - var j, k, l, m, spaces int - // Trim trailing spaces - for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { - } - - // Trim leading spaces - for k = 0; k < j && str[k] == ' '; k++ { - } - str = str[k : j+1] - - // Strip multiple spaces. - j = strings.Index(str, doubleSpace) - if j < 0 { - return str - } - - buf := []byte(str) - for k, m, l = j, j, len(buf); k < l; k++ { - if buf[k] == ' ' { - if spaces == 0 { - // First space. - buf[m] = buf[k] - m++ - } - spaces++ - } else { - // End of multiple spaces. - spaces = 0 - buf[m] = buf[k] - m++ - } - } - - return string(buf[:m]) -} - -// GetURIPath returns the escaped URI component from the provided URL. -func GetURIPath(u *url.URL) string { - var uriPath string - - if len(u.Opaque) > 0 { - const schemeSep, pathSep, queryStart = "//", "/", "?" - - opaque := u.Opaque - // Cut off the query string if present. - if idx := strings.Index(opaque, queryStart); idx >= 0 { - opaque = opaque[:idx] - } - - // Cutout the scheme separator if present. - if strings.Index(opaque, schemeSep) == 0 { - opaque = opaque[len(schemeSep):] - } - - // capture URI path starting with first path separator. - if idx := strings.Index(opaque, pathSep); idx >= 0 { - uriPath = opaque[idx:] - } - } else { - uriPath = u.EscapedPath() - } - - if len(uriPath) == 0 { - uriPath = "/" - } - - return uriPath -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go deleted file mode 100644 index febeb0482d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go +++ /dev/null @@ -1,443 +0,0 @@ -package v4 - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" - v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const computePayloadHashMiddlewareID = "ComputePayloadHash" - -// HashComputationError indicates an error occurred while computing the signing hash -type HashComputationError struct { - Err error -} - -// Error is the error message -func (e *HashComputationError) Error() string { - return fmt.Sprintf("failed to compute payload hash: %v", e.Err) -} - -// Unwrap returns the underlying error if one is set -func (e *HashComputationError) Unwrap() error { - return e.Err -} - -// SigningError indicates an error condition occurred while performing SigV4 signing -type SigningError struct { - Err error -} - -func (e *SigningError) Error() string { - return fmt.Sprintf("failed to sign request: %v", e.Err) -} - -// Unwrap returns the underlying error cause -func (e *SigningError) Unwrap() error { - return e.Err -} - -// UseDynamicPayloadSigningMiddleware swaps the compute payload sha256 middleware with a resolver middleware that -// switches between unsigned and signed payload based on TLS state for request. -// This middleware should not be used for AWS APIs that do not support unsigned payload signing auth. -// By default, SDK uses this middleware for known AWS APIs that support such TLS based auth selection . -// -// Usage example - -// S3 PutObject API allows unsigned payload signing auth usage when TLS is enabled, and uses this middleware to -// dynamically switch between unsigned and signed payload based on TLS state for request. -func UseDynamicPayloadSigningMiddleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{}) - return err -} - -// dynamicPayloadSigningMiddleware dynamically resolves the middleware that computes and set payload sha256 middleware. -type dynamicPayloadSigningMiddleware struct { -} - -// ID returns the resolver identifier -func (m *dynamicPayloadSigningMiddleware) ID() string { - return computePayloadHashMiddlewareID -} - -// HandleFinalize delegates SHA256 computation according to whether the request -// is TLS-enabled. -func (m *dynamicPayloadSigningMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if req.IsHTTPS() { - return (&UnsignedPayload{}).HandleFinalize(ctx, in, next) - } - return (&ComputePayloadSHA256{}).HandleFinalize(ctx, in, next) -} - -// UnsignedPayload sets the SigV4 request payload hash to unsigned. -// -// Will not set the Unsigned Payload magic SHA value, if a SHA has already been -// stored in the context. (e.g. application pre-computed SHA256 before making -// API call). -// -// This middleware does not check the X-Amz-Content-Sha256 header, if that -// header is serialized a middleware must translate it into the context. -type UnsignedPayload struct{} - -// AddUnsignedPayloadMiddleware adds unsignedPayload to the operation -// middleware stack -func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -// ID returns the unsignedPayload identifier -func (m *UnsignedPayload) ID() string { - return computePayloadHashMiddlewareID -} - -// HandleFinalize sets the payload hash magic value to the unsigned sentinel. -func (m *UnsignedPayload) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if GetPayloadHash(ctx) == "" { - ctx = SetPayloadHash(ctx, v4Internal.UnsignedPayload) - } - return next.HandleFinalize(ctx, in) -} - -// ComputePayloadSHA256 computes SHA256 payload hash to sign. -// -// Will not set the Unsigned Payload magic SHA value, if a SHA has already been -// stored in the context. (e.g. application pre-computed SHA256 before making -// API call). -// -// This middleware does not check the X-Amz-Content-Sha256 header, if that -// header is serialized a middleware must translate it into the context. -type ComputePayloadSHA256 struct{} - -// AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the -// operation middleware stack -func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -// RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the -// operation middleware stack -func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Remove(computePayloadHashMiddlewareID) - return err -} - -// ID is the middleware name -func (m *ComputePayloadSHA256) ID() string { - return computePayloadHashMiddlewareID -} - -// HandleFinalize computes the payload hash for the request, storing it to the -// context. This is a no-op if a caller has previously set that value. -func (m *ComputePayloadSHA256) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if GetPayloadHash(ctx) != "" { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &HashComputationError{ - Err: fmt.Errorf("unexpected request middleware type %T", in.Request), - } - } - - hash := sha256.New() - if stream := req.GetStream(); stream != nil { - _, err = io.Copy(hash, stream) - if err != nil { - return out, metadata, &HashComputationError{ - Err: fmt.Errorf("failed to compute payload hash, %w", err), - } - } - - if err := req.RewindStream(); err != nil { - return out, metadata, &HashComputationError{ - Err: fmt.Errorf("failed to seek body to start, %w", err), - } - } - } - - ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil))) - - return next.HandleFinalize(ctx, in) -} - -// SwapComputePayloadSHA256ForUnsignedPayloadMiddleware replaces the -// ComputePayloadSHA256 middleware with the UnsignedPayload middleware. -// -// Use this to disable computing the Payload SHA256 checksum and instead use -// UNSIGNED-PAYLOAD for the SHA256 value. -func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &UnsignedPayload{}) - return err -} - -// ContentSHA256Header sets the X-Amz-Content-Sha256 header value to -// the Payload hash stored in the context. -type ContentSHA256Header struct{} - -// AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the -// operation middleware stack -func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&ContentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) -} - -// RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware -// from the operation middleware stack -func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Remove((*ContentSHA256Header)(nil).ID()) - return err -} - -// ID returns the ContentSHA256HeaderMiddleware identifier -func (m *ContentSHA256Header) ID() string { - return "SigV4ContentSHA256Header" -} - -// HandleFinalize sets the X-Amz-Content-Sha256 header value to the Payload hash -// stored in the context. -func (m *ContentSHA256Header) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &HashComputationError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} - } - - req.Header.Set(v4Internal.ContentSHAKey, GetPayloadHash(ctx)) - return next.HandleFinalize(ctx, in) -} - -// SignHTTPRequestMiddlewareOptions is the configuration options for -// [SignHTTPRequestMiddleware]. -// -// Deprecated: [SignHTTPRequestMiddleware] is deprecated. -type SignHTTPRequestMiddlewareOptions struct { - CredentialsProvider aws.CredentialsProvider - Signer HTTPSigner - LogSigning bool -} - -// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4 -// HTTP Signing. -// -// Deprecated: AWS service clients no longer use this middleware. Signing as an -// SDK operation is now performed through an internal per-service middleware -// which opaquely selects and uses the signer from the resolved auth scheme. -type SignHTTPRequestMiddleware struct { - credentialsProvider aws.CredentialsProvider - signer HTTPSigner - logSigning bool -} - -// NewSignHTTPRequestMiddleware constructs a [SignHTTPRequestMiddleware] using -// the given [Signer] for signing requests. -// -// Deprecated: SignHTTPRequestMiddleware is deprecated. -func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { - return &SignHTTPRequestMiddleware{ - credentialsProvider: options.CredentialsProvider, - signer: options.Signer, - logSigning: options.LogSigning, - } -} - -// ID is the SignHTTPRequestMiddleware identifier. -// -// Deprecated: SignHTTPRequestMiddleware is deprecated. -func (s *SignHTTPRequestMiddleware) ID() string { - return "Signing" -} - -// HandleFinalize will take the provided input and sign the request using the -// SigV4 authentication scheme. -// -// Deprecated: SignHTTPRequestMiddleware is deprecated. -func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if !haveCredentialProvider(s.credentialsProvider) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &SigningError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} - } - - signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx) - payloadHash := GetPayloadHash(ctx) - if len(payloadHash) == 0 { - return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} - } - - mctx := metrics.Context(ctx) - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.CredentialFetchStartTime = sdk.NowTime() - } - } - - credentials, err := s.credentialsProvider.Retrieve(ctx) - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.CredentialFetchEndTime = sdk.NowTime() - } - } - - if err != nil { - return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} - } - - signerOptions := []func(o *SignerOptions){ - func(o *SignerOptions) { - o.Logger = middleware.GetLogger(ctx) - o.LogSigning = s.logSigning - }, - } - - // existing DisableURIPathEscaping is equivalent in purpose - // to authentication scheme property DisableDoubleEncoding - disableDoubleEncoding, overridden := internalauth.GetDisableDoubleEncoding(ctx) - if overridden { - signerOptions = append(signerOptions, func(o *SignerOptions) { - o.DisableURIPathEscaping = disableDoubleEncoding - }) - } - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.SignStartTime = sdk.NowTime() - } - } - - err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...) - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.SignEndTime = sdk.NowTime() - } - } - - if err != nil { - return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} - } - - ctx = awsmiddleware.SetSigningCredentials(ctx, credentials) - - return next.HandleFinalize(ctx, in) -} - -// StreamingEventsPayload signs input event stream messages. -type StreamingEventsPayload struct{} - -// AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack. -func AddStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&StreamingEventsPayload{}, middleware.Before) -} - -// ID identifies the middleware. -func (s *StreamingEventsPayload) ID() string { - return computePayloadHashMiddlewareID -} - -// HandleFinalize marks the input stream to be signed with SigV4. -func (s *StreamingEventsPayload) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - contentSHA := GetPayloadHash(ctx) - if len(contentSHA) == 0 { - contentSHA = v4Internal.StreamingEventsPayload - } - - ctx = SetPayloadHash(ctx, contentSHA) - - return next.HandleFinalize(ctx, in) -} - -// GetSignedRequestSignature attempts to extract the signature of the request. -// Returning an error if the request is unsigned, or unable to extract the -// signature. -func GetSignedRequestSignature(r *http.Request) ([]byte, error) { - const authHeaderSignatureElem = "Signature=" - - if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { - ps := strings.Split(auth, ", ") - for _, p := range ps { - if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { - sig := p[len(authHeaderSignatureElem):] - if len(sig) == 0 { - return nil, fmt.Errorf("invalid request signature authorization header") - } - return hex.DecodeString(sig) - } - } - } - - if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { - return hex.DecodeString(sig) - } - - return nil, fmt.Errorf("request not signed") -} - -func haveCredentialProvider(p aws.CredentialsProvider) bool { - if p == nil { - return false - } - - return !aws.IsCredentialsProvider(p, (*aws.AnonymousCredentials)(nil)) -} - -type payloadHashKey struct{} - -// GetPayloadHash retrieves the payload hash to use for signing -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetPayloadHash(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, payloadHashKey{}).(string) - return v -} - -// SetPayloadHash sets the payload hash to be used for signing the request -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetPayloadHash(ctx context.Context, hash string) context.Context { - return middleware.WithStackValue(ctx, payloadHashKey{}, hash) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go deleted file mode 100644 index e1a0665124..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go +++ /dev/null @@ -1,127 +0,0 @@ -package v4 - -import ( - "context" - "fmt" - "net/http" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go/middleware" - smithyHTTP "github.com/aws/smithy-go/transport/http" -) - -// HTTPPresigner is an interface to a SigV4 signer that can sign create a -// presigned URL for a HTTP requests. -type HTTPPresigner interface { - PresignHTTP( - ctx context.Context, credentials aws.Credentials, r *http.Request, - payloadHash string, service string, region string, signingTime time.Time, - optFns ...func(*SignerOptions), - ) (url string, signedHeader http.Header, err error) -} - -// PresignedHTTPRequest provides the URL and signed headers that are included -// in the presigned URL. -type PresignedHTTPRequest struct { - URL string - Method string - SignedHeader http.Header -} - -// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. -type PresignHTTPRequestMiddlewareOptions struct { - CredentialsProvider aws.CredentialsProvider - Presigner HTTPPresigner - LogSigning bool -} - -// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a -// presigned URL for an HTTP request. -// -// Will short circuit the middleware stack and not forward onto the next -// Finalize handler. -type PresignHTTPRequestMiddleware struct { - credentialsProvider aws.CredentialsProvider - presigner HTTPPresigner - logSigning bool -} - -// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware -// initialized with the presigner. -func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { - return &PresignHTTPRequestMiddleware{ - credentialsProvider: options.CredentialsProvider, - presigner: options.Presigner, - logSigning: options.LogSigning, - } -} - -// ID provides the middleware ID. -func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } - -// HandleFinalize will take the provided input and create a presigned url for -// the http request using the SigV4 presign authentication scheme. -// -// Since the signed request is not a valid HTTP request -func (s *PresignHTTPRequestMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyHTTP.Request) - if !ok { - return out, metadata, &SigningError{ - Err: fmt.Errorf("unexpected request middleware type %T", in.Request), - } - } - - httpReq := req.Build(ctx) - if !haveCredentialProvider(s.credentialsProvider) { - out.Result = &PresignedHTTPRequest{ - URL: httpReq.URL.String(), - Method: httpReq.Method, - SignedHeader: http.Header{}, - } - - return out, metadata, nil - } - - signingName := awsmiddleware.GetSigningName(ctx) - signingRegion := awsmiddleware.GetSigningRegion(ctx) - payloadHash := GetPayloadHash(ctx) - if len(payloadHash) == 0 { - return out, metadata, &SigningError{ - Err: fmt.Errorf("computed payload hash missing from context"), - } - } - - credentials, err := s.credentialsProvider.Retrieve(ctx) - if err != nil { - return out, metadata, &SigningError{ - Err: fmt.Errorf("failed to retrieve credentials: %w", err), - } - } - - u, h, err := s.presigner.PresignHTTP(ctx, credentials, - httpReq, payloadHash, signingName, signingRegion, sdk.NowTime(), - func(o *SignerOptions) { - o.Logger = middleware.GetLogger(ctx) - o.LogSigning = s.logSigning - }) - if err != nil { - return out, metadata, &SigningError{ - Err: fmt.Errorf("failed to sign http request, %w", err), - } - } - - out.Result = &PresignedHTTPRequest{ - URL: u, - Method: httpReq.Method, - SignedHeader: h, - } - - return out, metadata, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go deleted file mode 100644 index 66aa2bd6ab..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go +++ /dev/null @@ -1,86 +0,0 @@ -package v4 - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "github.com/aws/aws-sdk-go-v2/aws" - v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" - "strings" - "time" -) - -// EventStreamSigner is an AWS EventStream protocol signer. -type EventStreamSigner interface { - GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) -} - -// StreamSignerOptions is the configuration options for StreamSigner. -type StreamSignerOptions struct{} - -// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads. -type StreamSigner struct { - options StreamSignerOptions - - credentials aws.Credentials - service string - region string - - prevSignature []byte - - signingKeyDeriver *v4Internal.SigningKeyDeriver -} - -// NewStreamSigner returns a new AWS EventStream protocol signer. -func NewStreamSigner(credentials aws.Credentials, service, region string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner { - o := StreamSignerOptions{} - - for _, fn := range optFns { - fn(&o) - } - - return &StreamSigner{ - options: o, - credentials: credentials, - service: service, - region: region, - signingKeyDeriver: v4Internal.NewSigningKeyDeriver(), - prevSignature: seedSignature, - } -} - -// GetSignature signs the provided header and payload bytes. -func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) { - options := s.options - - for _, fn := range optFns { - fn(&options) - } - - prevSignature := s.prevSignature - - st := v4Internal.NewSigningTime(signingTime) - - sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st) - - scope := v4Internal.BuildCredentialScope(st, s.region, s.service) - - stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st) - - signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign)) - s.prevSignature = signature - - return signature, nil -} - -func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string { - hash := sha256.New() - return strings.Join([]string{ - "AWS4-HMAC-SHA256-PAYLOAD", - signingTime.TimeFormat(), - credentialScope, - hex.EncodeToString(previousSignature), - hex.EncodeToString(makeHash(hash, headers)), - hex.EncodeToString(makeHash(hash, payload)), - }, "\n") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go deleted file mode 100644 index bb61904e1d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go +++ /dev/null @@ -1,560 +0,0 @@ -// Package v4 implements signing for AWS V4 signer -// -// Provides request signing for request that need to be signed with -// AWS V4 Signatures. -// -// # Standalone Signer -// -// Generally using the signer outside of the SDK should not require any additional -// -// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires -// -// additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent -// to the service as. -// -// The signer will first check the URL.Opaque field, and use its value if set. -// The signer does require the URL.Opaque field to be set in the form of: -// -// "///" -// -// // e.g. -// "//example.com/some/path" -// -// The leading "//" and hostname are required or the URL.Opaque escaping will -// not work correctly. -// -// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() -// method and using the returned value. -// -// AWS v4 signature validation requires that the canonical string's URI path -// element must be the URI escaped form of the HTTP request's path. -// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -// -// The Go HTTP client will perform escaping automatically on the request. Some -// of these escaping may cause signature validation errors because the HTTP -// request differs from the URI path or query that the signature was generated. -// https://golang.org/pkg/net/url/#URL.EscapedPath -// -// Because of this, it is recommended that when using the signer outside of the -// SDK that explicitly escaping the request prior to being signed is preferable, -// and will help prevent signature validation errors. This can be done by setting -// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then -// call URL.EscapedPath() if Opaque is not set. -// -// Test `TestStandaloneSign` provides a complete example of using the signer -// outside of the SDK and pre-escaping the URI path. -package v4 - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "fmt" - "hash" - "net/http" - "net/textproto" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" - "github.com/aws/smithy-go/encoding/httpbinding" - "github.com/aws/smithy-go/logging" -) - -const ( - signingAlgorithm = "AWS4-HMAC-SHA256" - authorizationHeader = "Authorization" - - // Version of signing v4 - Version = "SigV4" -) - -// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests -type HTTPSigner interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*SignerOptions)) error -} - -type keyDerivator interface { - DeriveKey(credential aws.Credentials, service, region string, signingTime v4Internal.SigningTime) []byte -} - -// SignerOptions is the SigV4 Signer options. -type SignerOptions struct { - // Disables the Signer's moving HTTP header key/value pairs from the HTTP - // request header to the request's query string. This is most commonly used - // with pre-signed requests preventing headers from being added to the - // request's query string. - DisableHeaderHoisting bool - - // Disables the automatic escaping of the URI path of the request for the - // siganture's canonical string's path. For services that do not need additional - // escaping then use this to disable the signer escaping the path. - // - // S3 is an example of a service that does not need additional escaping. - // - // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - DisableURIPathEscaping bool - - // The logger to send log messages to. - Logger logging.Logger - - // Enable logging of signed requests. - // This will enable logging of the canonical request, the string to sign, and for presigning the subsequent - // presigned URL. - LogSigning bool - - // Disables setting the session token on the request as part of signing - // through X-Amz-Security-Token. This is needed for variations of v4 that - // present the token elsewhere. - DisableSessionToken bool -} - -// Signer applies AWS v4 signing to given request. Use this to sign requests -// that need to be signed with AWS V4 Signatures. -type Signer struct { - options SignerOptions - keyDerivator keyDerivator -} - -// NewSigner returns a new SigV4 Signer -func NewSigner(optFns ...func(signer *SignerOptions)) *Signer { - options := SignerOptions{} - - for _, fn := range optFns { - fn(&options) - } - - return &Signer{options: options, keyDerivator: v4Internal.NewSigningKeyDeriver()} -} - -type httpSigner struct { - Request *http.Request - ServiceName string - Region string - Time v4Internal.SigningTime - Credentials aws.Credentials - KeyDerivator keyDerivator - IsPreSign bool - - PayloadHash string - - DisableHeaderHoisting bool - DisableURIPathEscaping bool - DisableSessionToken bool -} - -func (s *httpSigner) Build() (signedRequest, error) { - req := s.Request - - query := req.URL.Query() - headers := req.Header - - s.setRequiredSigningFields(headers, query) - - // Sort Each Query Key's Values - for key := range query { - sort.Strings(query[key]) - } - - v4Internal.SanitizeHostForHeader(req) - - credentialScope := s.buildCredentialScope() - credentialStr := s.Credentials.AccessKeyID + "/" + credentialScope - if s.IsPreSign { - query.Set(v4Internal.AmzCredentialKey, credentialStr) - } - - unsignedHeaders := headers - if s.IsPreSign && !s.DisableHeaderHoisting { - var urlValues url.Values - urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, headers) - for k := range urlValues { - query[k] = urlValues[k] - } - } - - host := req.URL.Host - if len(req.Host) > 0 { - host = req.Host - } - - signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength) - - if s.IsPreSign { - query.Set(v4Internal.AmzSignedHeadersKey, signedHeadersStr) - } - - var rawQuery strings.Builder - rawQuery.WriteString(strings.Replace(query.Encode(), "+", "%20", -1)) - - canonicalURI := v4Internal.GetURIPath(req.URL) - if !s.DisableURIPathEscaping { - canonicalURI = httpbinding.EscapePath(canonicalURI, false) - } - - canonicalString := s.buildCanonicalString( - req.Method, - canonicalURI, - rawQuery.String(), - signedHeadersStr, - canonicalHeaderStr, - ) - - strToSign := s.buildStringToSign(credentialScope, canonicalString) - signingSignature, err := s.buildSignature(strToSign) - if err != nil { - return signedRequest{}, err - } - - if s.IsPreSign { - rawQuery.WriteString("&X-Amz-Signature=") - rawQuery.WriteString(signingSignature) - } else { - headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature)) - } - - req.URL.RawQuery = rawQuery.String() - - return signedRequest{ - Request: req, - SignedHeaders: signedHeaders, - CanonicalString: canonicalString, - StringToSign: strToSign, - PreSigned: s.IsPreSign, - }, nil -} - -func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string { - const credential = "Credential=" - const signedHeaders = "SignedHeaders=" - const signature = "Signature=" - const commaSpace = ", " - - var parts strings.Builder - parts.Grow(len(signingAlgorithm) + 1 + - len(credential) + len(credentialStr) + 2 + - len(signedHeaders) + len(signedHeadersStr) + 2 + - len(signature) + len(signingSignature), - ) - parts.WriteString(signingAlgorithm) - parts.WriteRune(' ') - parts.WriteString(credential) - parts.WriteString(credentialStr) - parts.WriteString(commaSpace) - parts.WriteString(signedHeaders) - parts.WriteString(signedHeadersStr) - parts.WriteString(commaSpace) - parts.WriteString(signature) - parts.WriteString(signingSignature) - return parts.String() -} - -// SignHTTP signs AWS v4 requests with the provided payload hash, service name, region the -// request is made to, and time the request is signed at. The signTime allows -// you to specify that a request is signed for the future, and cannot be -// used until then. -// -// The payloadHash is the hex encoded SHA-256 hash of the request payload, and -// must be provided. Even if the request has no payload (aka body). If the -// request has no payload you should use the hex encoded SHA-256 of an empty -// string as the payloadHash value. -// -// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -// -// Some services such as Amazon S3 accept alternative values for the payload -// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be -// included in the request signature. -// -// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html -// -// Sign differs from Presign in that it will sign the request using HTTP -// header values. This type of signing is intended for http.Request values that -// will not be shared, or are shared in a way the header values on the request -// will not be lost. -// -// The passed in request will be modified in place. -func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(options *SignerOptions)) error { - options := s.options - - for _, fn := range optFns { - fn(&options) - } - - signer := &httpSigner{ - Request: r, - PayloadHash: payloadHash, - ServiceName: service, - Region: region, - Credentials: credentials, - Time: v4Internal.NewSigningTime(signingTime.UTC()), - DisableHeaderHoisting: options.DisableHeaderHoisting, - DisableURIPathEscaping: options.DisableURIPathEscaping, - DisableSessionToken: options.DisableSessionToken, - KeyDerivator: s.keyDerivator, - } - - signedRequest, err := signer.Build() - if err != nil { - return err - } - - logSigningInfo(ctx, options, &signedRequest, false) - - return nil -} - -// PresignHTTP signs AWS v4 requests with the payload hash, service name, region -// the request is made to, and time the request is signed at. The signTime -// allows you to specify that a request is signed for the future, and cannot -// be used until then. -// -// Returns the signed URL and the map of HTTP headers that were included in the -// signature or an error if signing the request failed. For presigned requests -// these headers and their values must be included on the HTTP request when it -// is made. This is helpful to know what header values need to be shared with -// the party the presigned request will be distributed to. -// -// The payloadHash is the hex encoded SHA-256 hash of the request payload, and -// must be provided. Even if the request has no payload (aka body). If the -// request has no payload you should use the hex encoded SHA-256 of an empty -// string as the payloadHash value. -// -// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -// -// Some services such as Amazon S3 accept alternative values for the payload -// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be -// included in the request signature. -// -// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html -// -// PresignHTTP differs from SignHTTP in that it will sign the request using -// query string instead of header values. This allows you to share the -// Presigned Request's URL with third parties, or distribute it throughout your -// system with minimal dependencies. -// -// PresignHTTP will not set the expires time of the presigned request -// automatically. To specify the expire duration for a request add the -// "X-Amz-Expires" query parameter on the request with the value as the -// duration in seconds the presigned URL should be considered valid for. This -// parameter is not used by all AWS services, and is most notable used by -// Amazon S3 APIs. -// -// expires := 20 * time.Minute -// query := req.URL.Query() -// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10)) -// req.URL.RawQuery = query.Encode() -// -// This method does not modify the provided request. -func (s *Signer) PresignHTTP( - ctx context.Context, credentials aws.Credentials, r *http.Request, - payloadHash string, service string, region string, signingTime time.Time, - optFns ...func(*SignerOptions), -) (signedURI string, signedHeaders http.Header, err error) { - options := s.options - - for _, fn := range optFns { - fn(&options) - } - - signer := &httpSigner{ - Request: r.Clone(r.Context()), - PayloadHash: payloadHash, - ServiceName: service, - Region: region, - Credentials: credentials, - Time: v4Internal.NewSigningTime(signingTime.UTC()), - IsPreSign: true, - DisableHeaderHoisting: options.DisableHeaderHoisting, - DisableURIPathEscaping: options.DisableURIPathEscaping, - DisableSessionToken: options.DisableSessionToken, - KeyDerivator: s.keyDerivator, - } - - signedRequest, err := signer.Build() - if err != nil { - return "", nil, err - } - - logSigningInfo(ctx, options, &signedRequest, true) - - signedHeaders = make(http.Header) - - // For the signed headers we canonicalize the header keys in the returned map. - // This avoids situations where can standard library double headers like host header. For example the standard - // library will set the Host header, even if it is present in lower-case form. - for k, v := range signedRequest.SignedHeaders { - key := textproto.CanonicalMIMEHeaderKey(k) - signedHeaders[key] = append(signedHeaders[key], v...) - } - - return signedRequest.Request.URL.String(), signedHeaders, nil -} - -func (s *httpSigner) buildCredentialScope() string { - return v4Internal.BuildCredentialScope(s.Time, s.Region, s.ServiceName) -} - -func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - for k, h := range header { - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} - -func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { - signed = make(http.Header) - - var headers []string - const hostHeader = "host" - headers = append(headers, hostHeader) - signed[hostHeader] = append(signed[hostHeader], host) - - const contentLengthHeader = "content-length" - if length > 0 { - headers = append(headers, contentLengthHeader) - signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10)) - } - - for k, v := range header { - if !rule.IsValid(k) { - continue // ignored header - } - if strings.EqualFold(k, contentLengthHeader) { - // prevent signing already handled content-length header. - continue - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := signed[lowerCaseKey]; ok { - // include additional values - signed[lowerCaseKey] = append(signed[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - signed[lowerCaseKey] = v - } - sort.Strings(headers) - - signedHeaders = strings.Join(headers, ";") - - var canonicalHeaders strings.Builder - n := len(headers) - const colon = ':' - for i := 0; i < n; i++ { - if headers[i] == hostHeader { - canonicalHeaders.WriteString(hostHeader) - canonicalHeaders.WriteRune(colon) - canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host)) - } else { - canonicalHeaders.WriteString(headers[i]) - canonicalHeaders.WriteRune(colon) - // Trim out leading, trailing, and dedup inner spaces from signed header values. - values := signed[headers[i]] - for j, v := range values { - cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v)) - canonicalHeaders.WriteString(cleanedValue) - if j < len(values)-1 { - canonicalHeaders.WriteRune(',') - } - } - } - canonicalHeaders.WriteRune('\n') - } - canonicalHeadersStr = canonicalHeaders.String() - - return signed, signedHeaders, canonicalHeadersStr -} - -func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string { - return strings.Join([]string{ - method, - uri, - query, - canonicalHeaders, - signedHeaders, - s.PayloadHash, - }, "\n") -} - -func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string { - return strings.Join([]string{ - signingAlgorithm, - s.Time.TimeFormat(), - credentialScope, - hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))), - }, "\n") -} - -func makeHash(hash hash.Hash, b []byte) []byte { - hash.Reset() - hash.Write(b) - return hash.Sum(nil) -} - -func (s *httpSigner) buildSignature(strToSign string) (string, error) { - key := s.KeyDerivator.DeriveKey(s.Credentials, s.ServiceName, s.Region, s.Time) - return hex.EncodeToString(v4Internal.HMACSHA256(key, []byte(strToSign))), nil -} - -func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) { - amzDate := s.Time.TimeFormat() - - if s.IsPreSign { - query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm) - sessionToken := s.Credentials.SessionToken - if !s.DisableSessionToken && len(sessionToken) > 0 { - query.Set("X-Amz-Security-Token", sessionToken) - } - - query.Set(v4Internal.AmzDateKey, amzDate) - return - } - - headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate) - - if !s.DisableSessionToken && len(s.Credentials.SessionToken) > 0 { - headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken) - } -} - -func logSigningInfo(ctx context.Context, options SignerOptions, request *signedRequest, isPresign bool) { - if !options.LogSigning { - return - } - signedURLMsg := "" - if isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, request.Request.URL.String()) - } - logger := logging.WithContext(ctx, options.Logger) - logger.Logf(logging.Debug, logSignInfoMsg, request.CanonicalString, request.StringToSign, signedURLMsg) -} - -type signedRequest struct { - Request *http.Request - SignedHeaders http.Header - CanonicalString string - StringToSign string - PreSigned bool -} - -const logSignInfoMsg = `Request Signature: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go deleted file mode 100644 index f3fc4d610d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go +++ /dev/null @@ -1,297 +0,0 @@ -// Code generated by aws/generate.go DO NOT EDIT. - -package aws - -import ( - "github.com/aws/smithy-go/ptr" - "time" -) - -// Bool returns a pointer value for the bool value passed in. -func Bool(v bool) *bool { - return ptr.Bool(v) -} - -// BoolSlice returns a slice of bool pointers from the values -// passed in. -func BoolSlice(vs []bool) []*bool { - return ptr.BoolSlice(vs) -} - -// BoolMap returns a map of bool pointers from the values -// passed in. -func BoolMap(vs map[string]bool) map[string]*bool { - return ptr.BoolMap(vs) -} - -// Byte returns a pointer value for the byte value passed in. -func Byte(v byte) *byte { - return ptr.Byte(v) -} - -// ByteSlice returns a slice of byte pointers from the values -// passed in. -func ByteSlice(vs []byte) []*byte { - return ptr.ByteSlice(vs) -} - -// ByteMap returns a map of byte pointers from the values -// passed in. -func ByteMap(vs map[string]byte) map[string]*byte { - return ptr.ByteMap(vs) -} - -// String returns a pointer value for the string value passed in. -func String(v string) *string { - return ptr.String(v) -} - -// StringSlice returns a slice of string pointers from the values -// passed in. -func StringSlice(vs []string) []*string { - return ptr.StringSlice(vs) -} - -// StringMap returns a map of string pointers from the values -// passed in. -func StringMap(vs map[string]string) map[string]*string { - return ptr.StringMap(vs) -} - -// Int returns a pointer value for the int value passed in. -func Int(v int) *int { - return ptr.Int(v) -} - -// IntSlice returns a slice of int pointers from the values -// passed in. -func IntSlice(vs []int) []*int { - return ptr.IntSlice(vs) -} - -// IntMap returns a map of int pointers from the values -// passed in. -func IntMap(vs map[string]int) map[string]*int { - return ptr.IntMap(vs) -} - -// Int8 returns a pointer value for the int8 value passed in. -func Int8(v int8) *int8 { - return ptr.Int8(v) -} - -// Int8Slice returns a slice of int8 pointers from the values -// passed in. -func Int8Slice(vs []int8) []*int8 { - return ptr.Int8Slice(vs) -} - -// Int8Map returns a map of int8 pointers from the values -// passed in. -func Int8Map(vs map[string]int8) map[string]*int8 { - return ptr.Int8Map(vs) -} - -// Int16 returns a pointer value for the int16 value passed in. -func Int16(v int16) *int16 { - return ptr.Int16(v) -} - -// Int16Slice returns a slice of int16 pointers from the values -// passed in. -func Int16Slice(vs []int16) []*int16 { - return ptr.Int16Slice(vs) -} - -// Int16Map returns a map of int16 pointers from the values -// passed in. -func Int16Map(vs map[string]int16) map[string]*int16 { - return ptr.Int16Map(vs) -} - -// Int32 returns a pointer value for the int32 value passed in. -func Int32(v int32) *int32 { - return ptr.Int32(v) -} - -// Int32Slice returns a slice of int32 pointers from the values -// passed in. -func Int32Slice(vs []int32) []*int32 { - return ptr.Int32Slice(vs) -} - -// Int32Map returns a map of int32 pointers from the values -// passed in. -func Int32Map(vs map[string]int32) map[string]*int32 { - return ptr.Int32Map(vs) -} - -// Int64 returns a pointer value for the int64 value passed in. -func Int64(v int64) *int64 { - return ptr.Int64(v) -} - -// Int64Slice returns a slice of int64 pointers from the values -// passed in. -func Int64Slice(vs []int64) []*int64 { - return ptr.Int64Slice(vs) -} - -// Int64Map returns a map of int64 pointers from the values -// passed in. -func Int64Map(vs map[string]int64) map[string]*int64 { - return ptr.Int64Map(vs) -} - -// Uint returns a pointer value for the uint value passed in. -func Uint(v uint) *uint { - return ptr.Uint(v) -} - -// UintSlice returns a slice of uint pointers from the values -// passed in. -func UintSlice(vs []uint) []*uint { - return ptr.UintSlice(vs) -} - -// UintMap returns a map of uint pointers from the values -// passed in. -func UintMap(vs map[string]uint) map[string]*uint { - return ptr.UintMap(vs) -} - -// Uint8 returns a pointer value for the uint8 value passed in. -func Uint8(v uint8) *uint8 { - return ptr.Uint8(v) -} - -// Uint8Slice returns a slice of uint8 pointers from the values -// passed in. -func Uint8Slice(vs []uint8) []*uint8 { - return ptr.Uint8Slice(vs) -} - -// Uint8Map returns a map of uint8 pointers from the values -// passed in. -func Uint8Map(vs map[string]uint8) map[string]*uint8 { - return ptr.Uint8Map(vs) -} - -// Uint16 returns a pointer value for the uint16 value passed in. -func Uint16(v uint16) *uint16 { - return ptr.Uint16(v) -} - -// Uint16Slice returns a slice of uint16 pointers from the values -// passed in. -func Uint16Slice(vs []uint16) []*uint16 { - return ptr.Uint16Slice(vs) -} - -// Uint16Map returns a map of uint16 pointers from the values -// passed in. -func Uint16Map(vs map[string]uint16) map[string]*uint16 { - return ptr.Uint16Map(vs) -} - -// Uint32 returns a pointer value for the uint32 value passed in. -func Uint32(v uint32) *uint32 { - return ptr.Uint32(v) -} - -// Uint32Slice returns a slice of uint32 pointers from the values -// passed in. -func Uint32Slice(vs []uint32) []*uint32 { - return ptr.Uint32Slice(vs) -} - -// Uint32Map returns a map of uint32 pointers from the values -// passed in. -func Uint32Map(vs map[string]uint32) map[string]*uint32 { - return ptr.Uint32Map(vs) -} - -// Uint64 returns a pointer value for the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return ptr.Uint64(v) -} - -// Uint64Slice returns a slice of uint64 pointers from the values -// passed in. -func Uint64Slice(vs []uint64) []*uint64 { - return ptr.Uint64Slice(vs) -} - -// Uint64Map returns a map of uint64 pointers from the values -// passed in. -func Uint64Map(vs map[string]uint64) map[string]*uint64 { - return ptr.Uint64Map(vs) -} - -// Float32 returns a pointer value for the float32 value passed in. -func Float32(v float32) *float32 { - return ptr.Float32(v) -} - -// Float32Slice returns a slice of float32 pointers from the values -// passed in. -func Float32Slice(vs []float32) []*float32 { - return ptr.Float32Slice(vs) -} - -// Float32Map returns a map of float32 pointers from the values -// passed in. -func Float32Map(vs map[string]float32) map[string]*float32 { - return ptr.Float32Map(vs) -} - -// Float64 returns a pointer value for the float64 value passed in. -func Float64(v float64) *float64 { - return ptr.Float64(v) -} - -// Float64Slice returns a slice of float64 pointers from the values -// passed in. -func Float64Slice(vs []float64) []*float64 { - return ptr.Float64Slice(vs) -} - -// Float64Map returns a map of float64 pointers from the values -// passed in. -func Float64Map(vs map[string]float64) map[string]*float64 { - return ptr.Float64Map(vs) -} - -// Time returns a pointer value for the time.Time value passed in. -func Time(v time.Time) *time.Time { - return ptr.Time(v) -} - -// TimeSlice returns a slice of time.Time pointers from the values -// passed in. -func TimeSlice(vs []time.Time) []*time.Time { - return ptr.TimeSlice(vs) -} - -// TimeMap returns a map of time.Time pointers from the values -// passed in. -func TimeMap(vs map[string]time.Time) map[string]*time.Time { - return ptr.TimeMap(vs) -} - -// Duration returns a pointer value for the time.Duration value passed in. -func Duration(v time.Duration) *time.Duration { - return ptr.Duration(v) -} - -// DurationSlice returns a slice of time.Duration pointers from the values -// passed in. -func DurationSlice(vs []time.Duration) []*time.Duration { - return ptr.DurationSlice(vs) -} - -// DurationMap returns a map of time.Duration pointers from the values -// passed in. -func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { - return ptr.DurationMap(vs) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go deleted file mode 100644 index 26d90719b2..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go +++ /dev/null @@ -1,310 +0,0 @@ -package http - -import ( - "crypto/tls" - "github.com/aws/aws-sdk-go-v2/aws" - "net" - "net/http" - "reflect" - "sync" - "time" -) - -// Defaults for the HTTPTransportBuilder. -var ( - // Default connection pool options - DefaultHTTPTransportMaxIdleConns = 100 - DefaultHTTPTransportMaxIdleConnsPerHost = 10 - - // Default connection timeouts - DefaultHTTPTransportIdleConnTimeout = 90 * time.Second - DefaultHTTPTransportTLSHandleshakeTimeout = 10 * time.Second - DefaultHTTPTransportExpectContinueTimeout = 1 * time.Second - - // Default to TLS 1.2 for all HTTPS requests. - DefaultHTTPTransportTLSMinVersion uint16 = tls.VersionTLS12 -) - -// Timeouts for net.Dialer's network connection. -var ( - DefaultDialConnectTimeout = 30 * time.Second - DefaultDialKeepAliveTimeout = 30 * time.Second -) - -// BuildableClient provides a HTTPClient implementation with options to -// create copies of the HTTPClient when additional configuration is provided. -// -// The client's methods will not share the http.Transport value between copies -// of the BuildableClient. Only exported member values of the Transport and -// optional Dialer will be copied between copies of BuildableClient. -type BuildableClient struct { - transport *http.Transport - dialer *net.Dialer - - initOnce sync.Once - - clientTimeout time.Duration - client *http.Client -} - -// NewBuildableClient returns an initialized client for invoking HTTP -// requests. -func NewBuildableClient() *BuildableClient { - return &BuildableClient{} -} - -// Do implements the HTTPClient interface's Do method to invoke a HTTP request, -// and receive the response. Uses the BuildableClient's current -// configuration to invoke the http.Request. -// -// If connection pooling is enabled (aka HTTP KeepAlive) the client will only -// share pooled connections with its own instance. Copies of the -// BuildableClient will have their own connection pools. -// -// Redirect (3xx) responses will not be followed, the HTTP response received -// will returned instead. -func (b *BuildableClient) Do(req *http.Request) (*http.Response, error) { - b.initOnce.Do(b.build) - - return b.client.Do(req) -} - -// Freeze returns a frozen aws.HTTPClient implementation that is no longer a BuildableClient. -// Use this to prevent the SDK from applying DefaultMode configuration values to a buildable client. -func (b *BuildableClient) Freeze() aws.HTTPClient { - cpy := b.clone() - cpy.build() - return cpy.client -} - -func (b *BuildableClient) build() { - b.client = wrapWithLimitedRedirect(&http.Client{ - Timeout: b.clientTimeout, - Transport: b.GetTransport(), - }) -} - -func (b *BuildableClient) clone() *BuildableClient { - cpy := NewBuildableClient() - cpy.transport = b.GetTransport() - cpy.dialer = b.GetDialer() - cpy.clientTimeout = b.clientTimeout - - return cpy -} - -// WithTransportOptions copies the BuildableClient and returns it with the -// http.Transport options applied. -// -// If a non (*http.Transport) was set as the round tripper, the round tripper -// will be replaced with a default Transport value before invoking the option -// functions. -func (b *BuildableClient) WithTransportOptions(opts ...func(*http.Transport)) *BuildableClient { - cpy := b.clone() - - tr := cpy.GetTransport() - for _, opt := range opts { - opt(tr) - } - cpy.transport = tr - - return cpy -} - -// WithDialerOptions copies the BuildableClient and returns it with the -// net.Dialer options applied. Will set the client's http.Transport DialContext -// member. -func (b *BuildableClient) WithDialerOptions(opts ...func(*net.Dialer)) *BuildableClient { - cpy := b.clone() - - dialer := cpy.GetDialer() - for _, opt := range opts { - opt(dialer) - } - cpy.dialer = dialer - - tr := cpy.GetTransport() - tr.DialContext = cpy.dialer.DialContext - cpy.transport = tr - - return cpy -} - -// WithTimeout Sets the timeout used by the client for all requests. -func (b *BuildableClient) WithTimeout(timeout time.Duration) *BuildableClient { - cpy := b.clone() - cpy.clientTimeout = timeout - return cpy -} - -// GetTransport returns a copy of the client's HTTP Transport. -func (b *BuildableClient) GetTransport() *http.Transport { - var tr *http.Transport - if b.transport != nil { - tr = b.transport.Clone() - } else { - tr = defaultHTTPTransport() - } - - return tr -} - -// GetDialer returns a copy of the client's network dialer. -func (b *BuildableClient) GetDialer() *net.Dialer { - var dialer *net.Dialer - if b.dialer != nil { - dialer = shallowCopyStruct(b.dialer).(*net.Dialer) - } else { - dialer = defaultDialer() - } - - return dialer -} - -// GetTimeout returns a copy of the client's timeout to cancel requests with. -func (b *BuildableClient) GetTimeout() time.Duration { - return b.clientTimeout -} - -func defaultDialer() *net.Dialer { - return &net.Dialer{ - Timeout: DefaultDialConnectTimeout, - KeepAlive: DefaultDialKeepAliveTimeout, - DualStack: true, - } -} - -func defaultHTTPTransport() *http.Transport { - dialer := defaultDialer() - - tr := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: dialer.DialContext, - TLSHandshakeTimeout: DefaultHTTPTransportTLSHandleshakeTimeout, - MaxIdleConns: DefaultHTTPTransportMaxIdleConns, - MaxIdleConnsPerHost: DefaultHTTPTransportMaxIdleConnsPerHost, - IdleConnTimeout: DefaultHTTPTransportIdleConnTimeout, - ExpectContinueTimeout: DefaultHTTPTransportExpectContinueTimeout, - ForceAttemptHTTP2: true, - TLSClientConfig: &tls.Config{ - MinVersion: DefaultHTTPTransportTLSMinVersion, - }, - } - - return tr -} - -// shallowCopyStruct creates a shallow copy of the passed in source struct, and -// returns that copy of the same struct type. -func shallowCopyStruct(src interface{}) interface{} { - srcVal := reflect.ValueOf(src) - srcValType := srcVal.Type() - - var returnAsPtr bool - if srcValType.Kind() == reflect.Ptr { - srcVal = srcVal.Elem() - srcValType = srcValType.Elem() - returnAsPtr = true - } - dstVal := reflect.New(srcValType).Elem() - - for i := 0; i < srcValType.NumField(); i++ { - ft := srcValType.Field(i) - if len(ft.PkgPath) != 0 { - // unexported fields have a PkgPath - continue - } - - dstVal.Field(i).Set(srcVal.Field(i)) - } - - if returnAsPtr { - dstVal = dstVal.Addr() - } - - return dstVal.Interface() -} - -// wrapWithLimitedRedirect updates the Client's Transport and CheckRedirect to -// not follow any redirect other than 307 and 308. No other redirect will be -// followed. -// -// If the client does not have a Transport defined will use a new SDK default -// http.Transport configuration. -func wrapWithLimitedRedirect(c *http.Client) *http.Client { - tr := c.Transport - if tr == nil { - tr = defaultHTTPTransport() - } - - cc := *c - cc.CheckRedirect = limitedRedirect - cc.Transport = suppressBadHTTPRedirectTransport{ - tr: tr, - } - - return &cc -} - -// limitedRedirect is a CheckRedirect that prevents the client from following -// any non 307/308 HTTP status code redirects. -// -// The 307 and 308 redirects are allowed because the client must use the -// original HTTP method for the redirected to location. Whereas 301 and 302 -// allow the client to switch to GET for the redirect. -// -// Suppresses all redirect requests with a URL of badHTTPRedirectLocation. -func limitedRedirect(r *http.Request, via []*http.Request) error { - // Request.Response, in CheckRedirect is the response that is triggering - // the redirect. - resp := r.Response - if r.URL.String() == badHTTPRedirectLocation { - resp.Header.Del(badHTTPRedirectLocation) - return http.ErrUseLastResponse - } - - switch resp.StatusCode { - case 307, 308: - // Only allow 307 and 308 redirects as they preserve the method. - return nil - } - - return http.ErrUseLastResponse -} - -// suppressBadHTTPRedirectTransport provides an http.RoundTripper -// implementation that wraps another http.RoundTripper to prevent HTTP client -// receiving 301 and 302 HTTP responses redirects without the required location -// header. -// -// Clients using this utility must have a CheckRedirect, e.g. limitedRedirect, -// that check for responses with having a URL of baseHTTPRedirectLocation, and -// suppress the redirect. -type suppressBadHTTPRedirectTransport struct { - tr http.RoundTripper -} - -const badHTTPRedirectLocation = `https://amazonaws.com/badhttpredirectlocation` - -// RoundTrip backfills a stub location when a 301/302 response is received -// without a location. This stub location is used by limitedRedirect to prevent -// the HTTP client from failing attempting to use follow a redirect without a -// location value. -func (t suppressBadHTTPRedirectTransport) RoundTrip(r *http.Request) (*http.Response, error) { - resp, err := t.tr.RoundTrip(r) - if err != nil { - return resp, err - } - - // S3 is the only known service to return 301 without location header. - // The Go standard library HTTP client will return an opaque error if it - // tries to follow a 301/302 response missing the location header. - switch resp.StatusCode { - case 301, 302: - if v := resp.Header.Get("Location"); len(v) == 0 { - resp.Header.Set("Location", badHTTPRedirectLocation) - } - } - - return resp, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go deleted file mode 100644 index 556f54a7f7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go +++ /dev/null @@ -1,42 +0,0 @@ -package http - -import ( - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// removeContentTypeHeader is a build middleware that removes -// content type header if content-length header is unset or -// is set to zero, -type removeContentTypeHeader struct { -} - -// ID the name of the middleware. -func (m *removeContentTypeHeader) ID() string { - return "RemoveContentTypeHeader" -} - -// HandleBuild adds or appends the constructed user agent to the request. -func (m *removeContentTypeHeader) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in) - } - - // remove contentTypeHeader when content-length is zero - if req.ContentLength == 0 { - req.Header.Del("content-type") - } - - return next.HandleBuild(ctx, in) -} - -// RemoveContentTypeHeader removes content-type header if -// content length is unset or equal to zero. -func RemoveContentTypeHeader(stack *middleware.Stack) error { - return stack.Build.Add(&removeContentTypeHeader{}, middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go deleted file mode 100644 index 44651c9902..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go +++ /dev/null @@ -1,33 +0,0 @@ -package http - -import ( - "errors" - "fmt" - - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// ResponseError provides the HTTP centric error type wrapping the underlying error -// with the HTTP response value and the deserialized RequestID. -type ResponseError struct { - *smithyhttp.ResponseError - - // RequestID associated with response error - RequestID string -} - -// ServiceRequestID returns the request id associated with Response Error -func (e *ResponseError) ServiceRequestID() string { return e.RequestID } - -// Error returns the formatted error -func (e *ResponseError) Error() string { - return fmt.Sprintf( - "https response error StatusCode: %d, RequestID: %s, %v", - e.Response.StatusCode, e.RequestID, e.Err) -} - -// As populates target and returns true if the type of target is a error type that -// the ResponseError embeds, (e.g.AWS HTTP ResponseError) -func (e *ResponseError) As(target interface{}) bool { - return errors.As(e.ResponseError, target) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go deleted file mode 100644 index a1ad20fe34..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go +++ /dev/null @@ -1,56 +0,0 @@ -package http - -import ( - "context" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// AddResponseErrorMiddleware adds response error wrapper middleware -func AddResponseErrorMiddleware(stack *middleware.Stack) error { - // add error wrapper middleware before request id retriever middleware so that it can wrap the error response - // returned by operation deserializers - return stack.Deserialize.Insert(&ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) -} - -// ResponseErrorWrapper wraps operation errors with ResponseError. -type ResponseErrorWrapper struct { -} - -// ID returns the middleware identifier -func (m *ResponseErrorWrapper) ID() string { - return "ResponseErrorWrapper" -} - -// HandleDeserialize wraps the stack error with smithyhttp.ResponseError. -func (m *ResponseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err == nil { - // Nothing to do when there is no error. - return out, metadata, err - } - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - // No raw response to wrap with. - return out, metadata, err - } - - // look for request id in metadata - reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata) - - // Wrap the returned smithy error with the request id retrieved from the metadata - err = &ResponseError{ - ResponseError: &smithyhttp.ResponseError{ - Response: resp, - Err: err, - }, - RequestID: reqID, - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go deleted file mode 100644 index 993929bd9b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go +++ /dev/null @@ -1,104 +0,0 @@ -package http - -import ( - "context" - "fmt" - "io" - "time" - - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -type readResult struct { - n int - err error -} - -// ResponseTimeoutError is an error when the reads from the response are -// delayed longer than the timeout the read was configured for. -type ResponseTimeoutError struct { - TimeoutDur time.Duration -} - -// Timeout returns that the error is was caused by a timeout, and can be -// retried. -func (*ResponseTimeoutError) Timeout() bool { return true } - -func (e *ResponseTimeoutError) Error() string { - return fmt.Sprintf("read on body reach timeout limit, %v", e.TimeoutDur) -} - -// timeoutReadCloser will handle body reads that take too long. -// We will return a ErrReadTimeout error if a timeout occurs. -type timeoutReadCloser struct { - reader io.ReadCloser - duration time.Duration -} - -// Read will spin off a goroutine to call the reader's Read method. We will -// select on the timer's channel or the read's channel. Whoever completes first -// will be returned. -func (r *timeoutReadCloser) Read(b []byte) (int, error) { - timer := time.NewTimer(r.duration) - c := make(chan readResult, 1) - - go func() { - n, err := r.reader.Read(b) - timer.Stop() - c <- readResult{n: n, err: err} - }() - - select { - case data := <-c: - return data.n, data.err - case <-timer.C: - return 0, &ResponseTimeoutError{TimeoutDur: r.duration} - } -} - -func (r *timeoutReadCloser) Close() error { - return r.reader.Close() -} - -// AddResponseReadTimeoutMiddleware adds a middleware to the stack that wraps the -// response body so that a read that takes too long will return an error. -func AddResponseReadTimeoutMiddleware(stack *middleware.Stack, duration time.Duration) error { - return stack.Deserialize.Add(&readTimeout{duration: duration}, middleware.After) -} - -// readTimeout wraps the response body with a timeoutReadCloser -type readTimeout struct { - duration time.Duration -} - -// ID returns the id of the middleware -func (*readTimeout) ID() string { - return "ReadResponseTimeout" -} - -// HandleDeserialize implements the DeserializeMiddleware interface -func (m *readTimeout) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - response.Body = &timeoutReadCloser{ - reader: response.Body, - duration: m.duration, - } - out.RawResponse = response - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go deleted file mode 100644 index cc3ae81140..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go +++ /dev/null @@ -1,42 +0,0 @@ -package aws - -import ( - "fmt" -) - -// Ternary is an enum allowing an unknown or none state in addition to a bool's -// true and false. -type Ternary int - -func (t Ternary) String() string { - switch t { - case UnknownTernary: - return "unknown" - case FalseTernary: - return "false" - case TrueTernary: - return "true" - default: - return fmt.Sprintf("unknown value, %d", int(t)) - } -} - -// Bool returns true if the value is TrueTernary, false otherwise. -func (t Ternary) Bool() bool { - return t == TrueTernary -} - -// Enumerations for the values of the Ternary type. -const ( - UnknownTernary Ternary = iota - FalseTernary - TrueTernary -) - -// BoolTernary returns a true or false Ternary value for the bool provided. -func BoolTernary(v bool) Ternary { - if v { - return TrueTernary - } - return FalseTernary -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go deleted file mode 100644 index 5f729d45e1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package aws provides core functionality for making requests to AWS services. -package aws - -// SDKName is the name of this AWS SDK -const SDKName = "aws-sdk-go-v2" - -// SDKVersion is the version of this SDK -const SDKVersion = goModuleVersion diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md deleted file mode 100644 index d5e6071fa2..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ /dev/null @@ -1,605 +0,0 @@ -# v1.27.9 (2024-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.8 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.7 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.6 (2024-03-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.5 (2024-03-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.4 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.3 (2024-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.1 (2024-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.6 (2024-01-22) - -* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.5 (2024-01-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.4 (2024-01-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.3 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.2 (2023-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.1 (2023-12-08) - -* **Bug Fix**: Correct loading of [services *] sections into shared config. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.0 (2023-12-07) - -* **Feature**: Support modeled request compression. The only algorithm supported at this time is `gzip`. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.12 (2023-12-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.11 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.10 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.9 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.8 (2023-11-28.3) - -* **Bug Fix**: Correct resolution of S3Express auth disable toggle. - -# v1.25.7 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.6 (2023-11-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.5 (2023-11-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.3 (2023-11-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.2 (2023-11-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.1 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.0 (2023-11-14) - -* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.0 (2023-11-13) - -* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.0 (2023-11-09.2) - -* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.3 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.2 (2023-11-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.1 (2023-11-06) - -* No change notes available for this release. - -# v1.22.0 (2023-11-02) - -* **Feature**: Add env and shared config settings for disabling IMDSv1 fallback. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.1 (2023-10-24) - -* No change notes available for this release. - -# v1.19.0 (2023-10-16) - -* **Feature**: Modify logic of retrieving user agent appID from env config - -# v1.18.45 (2023-10-12) - -* **Bug Fix**: Fail to load config if an explicitly provided profile doesn't exist. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.44 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.43 (2023-10-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.42 (2023-09-22) - -* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. -* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.41 (2023-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.40 (2023-09-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.39 (2023-09-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.38 (2023-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.37 (2023-08-23) - -* No change notes available for this release. - -# v1.18.36 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.35 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.34 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.33 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.32 (2023-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.31 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.30 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.29 (2023-07-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.28 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.27 (2023-06-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.26 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.25 (2023-05-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.24 (2023-05-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.23 (2023-05-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.22 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.21 (2023-04-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.20 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.19 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.18 (2023-03-16) - -* **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015. - -# v1.18.17 (2023-03-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.16 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.15 (2023-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.14 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.13 (2023-02-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.12 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.11 (2023-02-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.10 (2023-01-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.9 (2023-01-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.8 (2023-01-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.7 (2022-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.6 (2022-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.5 (2022-12-15) - -* **Bug Fix**: Unify logic between shared config and in finding home directory -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.4 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.3 (2022-11-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.2 (2022-11-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.1 (2022-11-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.0 (2022-11-11) - -* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 -* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.11 (2022-11-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.10 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.9 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.8 (2022-09-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.7 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.6 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.4 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.3 (2022-08-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2022-08-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2022-08-14) - -* **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present. - -# v1.16.1 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2022-08-10) - -* **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`. - -# v1.15.17 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.16 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.15 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.14 (2022-07-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.13 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.12 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.11 (2022-06-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.10 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.9 (2022-05-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.8 (2022-05-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.7 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.6 (2022-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.5 (2022-05-09) - -* **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682) - -# v1.15.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2022-02-24) - -* **Feature**: Adds support for loading RetryMaxAttempts and RetryMod from the environment and shared configuration files. These parameters drive how the SDK's API client will initialize its default retryer, if custome retryer has not been specified. See [config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) module and [aws.Config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config) for more information about and how to use these new options. -* **Feature**: Adds support for the `ca_bundle` parameter in shared config and credentials files. The usage of the file is the same as environment variable, `AWS_CA_BUNDLE`, but sourced from shared config. Fixes [#1589](https://github.com/aws/aws-sdk-go-v2/issues/1589) -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2022-01-28) - -* **Bug Fix**: Fixes LoadDefaultConfig handling of errors returned by passed in functional options. Previously errors returned from the LoadOptions passed into LoadDefaultConfig were incorrectly ignored. [#1562](https://github.com/aws/aws-sdk-go-v2/pull/1562). Thanks to [Pinglei Guo](https://github.com/pingleig) for submitting this PR. -* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. -* **Bug Fix**: Updates `config` module to use os.UserHomeDir instead of hard coded environment variable for OS. [#1563](https://github.com/aws/aws-sdk-go-v2/pull/1563) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2022-01-07) - -* **Feature**: Add load option for CredentialCache. Adds a new member to the LoadOptions struct, CredentialsCacheOptions. This member allows specifying a function that will be used to configure the CredentialsCache. The CredentialsCacheOptions will only be used if the configuration loader will wrap the underlying credential provider in the CredentialsCache. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2021-12-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2021-12-02) - -* **Feature**: Add support for specifying `EndpointResolverWithOptions` on `LoadOptions`, and associated `WithEndpointResolverWithOptions`. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.3 (2021-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.2 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.1 (2021-11-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.3 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.2 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.1 (2021-09-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-09-02) - -* **Feature**: Add support for S3 Multi-Region Access Point ARNs. - -# v1.7.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.1 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-08-04) - -* **Feature**: adds error handling for defered close calls -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-07-15) - -* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-07-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-06-25) - -* **Feature**: Adds configuration setting for enabling endpoint discovery. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-05-20) - -* **Feature**: SSO credentials can now be defined alongside other credential providers within the same configuration profile. -* **Bug Fix**: Profile names were incorrectly normalized to lower-case, which could result in unexpected profile configurations. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go deleted file mode 100644 index 50582d89d5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go +++ /dev/null @@ -1,219 +0,0 @@ -package config - -import ( - "context" - "os" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// defaultAWSConfigResolvers are a slice of functions that will resolve external -// configuration values into AWS configuration values. -// -// This will setup the AWS configuration's Region, -var defaultAWSConfigResolvers = []awsConfigResolver{ - // Resolves the default configuration the SDK's aws.Config will be - // initialized with. - resolveDefaultAWSConfig, - - // Sets the logger to be used. Could be user provided logger, and client - // logging mode. - resolveLogger, - resolveClientLogMode, - - // Sets the HTTP client and configuration to use for making requests using - // the HTTP transport. - resolveHTTPClient, - resolveCustomCABundle, - - // Sets the endpoint resolving behavior the API Clients will use for making - // requests to. Clients default to their own clients this allows overrides - // to be specified. The resolveEndpointResolver option is deprecated, but - // we still need to set it for backwards compatibility on config - // construction. - resolveEndpointResolver, - resolveEndpointResolverWithOptions, - - // Sets the retry behavior API clients will use within their retry attempt - // middleware. Defaults to unset, allowing API clients to define their own - // retry behavior. - resolveRetryer, - - // Sets the region the API Clients should use for making requests to. - resolveRegion, - resolveEC2IMDSRegion, - resolveDefaultRegion, - - // Sets the additional set of middleware stack mutators that will custom - // API client request pipeline middleware. - resolveAPIOptions, - - // Resolves the DefaultsMode that should be used by SDK clients. If this - // mode is set to DefaultsModeAuto. - // - // Comes after HTTPClient and CustomCABundle to ensure the HTTP client is - // configured if provided before invoking IMDS if mode is auto. Comes - // before resolving credentials so that those subsequent clients use the - // configured auto mode. - resolveDefaultsModeOptions, - - // Sets the resolved credentials the API clients will use for - // authentication. Provides the SDK's default credential chain. - // - // Should probably be the last step in the resolve chain to ensure that all - // other configurations are resolved first in case downstream credentials - // implementations depend on or can be configured with earlier resolved - // configuration options. - resolveCredentials, - - // Sets the resolved bearer authentication token API clients will use for - // httpBearerAuth authentication scheme. - resolveBearerAuthToken, - - // Sets the sdk app ID if present in env var or shared config profile - resolveAppID, - - resolveBaseEndpoint, - - // Sets the DisableRequestCompression if present in env var or shared config profile - resolveDisableRequestCompression, - - // Sets the RequestMinCompressSizeBytes if present in env var or shared config profile - resolveRequestMinCompressSizeBytes, -} - -// A Config represents a generic configuration value or set of values. This type -// will be used by the AWSConfigResolvers to extract -// -// General the Config type will use type assertion against the Provider interfaces -// to extract specific data from the Config. -type Config interface{} - -// A loader is used to load external configuration data and returns it as -// a generic Config type. -// -// The loader should return an error if it fails to load the external configuration -// or the configuration data is malformed, or required components missing. -type loader func(context.Context, configs) (Config, error) - -// An awsConfigResolver will extract configuration data from the configs slice -// using the provider interfaces to extract specific functionality. The extracted -// configuration values will be written to the AWS Config value. -// -// The resolver should return an error if it it fails to extract the data, the -// data is malformed, or incomplete. -type awsConfigResolver func(ctx context.Context, cfg *aws.Config, configs configs) error - -// configs is a slice of Config values. These values will be used by the -// AWSConfigResolvers to extract external configuration values to populate the -// AWS Config type. -// -// Use AppendFromLoaders to add additional external Config values that are -// loaded from external sources. -// -// Use ResolveAWSConfig after external Config values have been added or loaded -// to extract the loaded configuration values into the AWS Config. -type configs []Config - -// AppendFromLoaders iterates over the slice of loaders passed in calling each -// loader function in order. The external config value returned by the loader -// will be added to the returned configs slice. -// -// If a loader returns an error this method will stop iterating and return -// that error. -func (cs configs) AppendFromLoaders(ctx context.Context, loaders []loader) (configs, error) { - for _, fn := range loaders { - cfg, err := fn(ctx, cs) - if err != nil { - return nil, err - } - - cs = append(cs, cfg) - } - - return cs, nil -} - -// ResolveAWSConfig returns a AWS configuration populated with values by calling -// the resolvers slice passed in. Each resolver is called in order. Any resolver -// may overwrite the AWS Configuration value of a previous resolver. -// -// If an resolver returns an error this method will return that error, and stop -// iterating over the resolvers. -func (cs configs) ResolveAWSConfig(ctx context.Context, resolvers []awsConfigResolver) (aws.Config, error) { - var cfg aws.Config - - for _, fn := range resolvers { - if err := fn(ctx, &cfg, cs); err != nil { - return aws.Config{}, err - } - } - - return cfg, nil -} - -// ResolveConfig calls the provide function passing slice of configuration sources. -// This implements the aws.ConfigResolver interface. -func (cs configs) ResolveConfig(f func(configs []interface{}) error) error { - var cfgs []interface{} - for i := range cs { - cfgs = append(cfgs, cs[i]) - } - return f(cfgs) -} - -// LoadDefaultConfig reads the SDK's default external configurations, and -// populates an AWS Config with the values from the external configurations. -// -// An optional variadic set of additional Config values can be provided as input -// that will be prepended to the configs slice. Use this to add custom configuration. -// The custom configurations must satisfy the respective providers for their data -// or the custom data will be ignored by the resolvers and config loaders. -// -// cfg, err := config.LoadDefaultConfig( context.TODO(), -// config.WithSharedConfigProfile("test-profile"), -// ) -// if err != nil { -// panic(fmt.Sprintf("failed loading config, %v", err)) -// } -// -// The default configuration sources are: -// * Environment Variables -// * Shared Configuration and Shared Credentials files. -func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) (cfg aws.Config, err error) { - var options LoadOptions - for _, optFn := range optFns { - if err := optFn(&options); err != nil { - return aws.Config{}, err - } - } - - // assign Load Options to configs - var cfgCpy = configs{options} - - cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, resolveConfigLoaders(&options)) - if err != nil { - return aws.Config{}, err - } - - cfg, err = cfgCpy.ResolveAWSConfig(ctx, defaultAWSConfigResolvers) - if err != nil { - return aws.Config{}, err - } - - return cfg, nil -} - -func resolveConfigLoaders(options *LoadOptions) []loader { - loaders := make([]loader, 2) - loaders[0] = loadEnvConfig - - // specification of a profile should cause a load failure if it doesn't exist - if os.Getenv(awsProfileEnvVar) != "" || options.SharedConfigProfile != "" { - loaders[1] = loadSharedConfig - } else { - loaders[1] = loadSharedConfigIgnoreNotExist - } - - return loaders -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go deleted file mode 100644 index 20b66367ff..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go +++ /dev/null @@ -1,47 +0,0 @@ -package config - -import ( - "context" - "os" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" -) - -const execEnvVar = "AWS_EXECUTION_ENV" - -// DefaultsModeOptions is the set of options that are used to configure -type DefaultsModeOptions struct { - // The SDK configuration defaults mode. Defaults to legacy if not specified. - // - // Supported modes are: auto, cross-region, in-region, legacy, mobile, standard - Mode aws.DefaultsMode - - // The EC2 Instance Metadata Client that should be used when performing environment - // discovery when aws.DefaultsModeAuto is set. - // - // If not specified the SDK will construct a client if the instance metadata service has not been disabled by - // the AWS_EC2_METADATA_DISABLED environment variable. - IMDSClient *imds.Client -} - -func resolveDefaultsModeRuntimeEnvironment(ctx context.Context, envConfig *EnvConfig, client *imds.Client) (aws.RuntimeEnvironment, error) { - getRegionOutput, err := client.GetRegion(ctx, &imds.GetRegionInput{}) - // honor context timeouts, but if we couldn't talk to IMDS don't fail runtime environment introspection. - select { - case <-ctx.Done(): - return aws.RuntimeEnvironment{}, err - default: - } - - var imdsRegion string - if err == nil { - imdsRegion = getRegionOutput.Region - } - - return aws.RuntimeEnvironment{ - EnvironmentIdentifier: aws.ExecutionEnvironmentID(os.Getenv(execEnvVar)), - Region: envConfig.Region, - EC2InstanceMetadataRegion: imdsRegion, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go deleted file mode 100644 index aab7164e28..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package config provides utilities for loading configuration from multiple -// sources that can be used to configure the SDK's API clients, and utilities. -// -// The config package will load configuration from environment variables, AWS -// shared configuration file (~/.aws/config), and AWS shared credentials file -// (~/.aws/credentials). -// -// Use the LoadDefaultConfig to load configuration from all the SDK's supported -// sources, and resolve credentials using the SDK's default credential chain. -// -// LoadDefaultConfig allows for a variadic list of additional Config sources that can -// provide one or more configuration values which can be used to programmatically control the resolution -// of a specific value, or allow for broader range of additional configuration sources not supported by the SDK. -// A Config source implements one or more provider interfaces defined in this package. Config sources passed in will -// take precedence over the default environment and shared config sources used by the SDK. If one or more Config sources -// implement the same provider interface, priority will be handled by the order in which the sources were passed in. -// -// A number of helpers (prefixed by “With“) are provided in this package that implement their respective provider -// interface. These helpers should be used for overriding configuration programmatically at runtime. -package config diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go deleted file mode 100644 index 88550198cc..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go +++ /dev/null @@ -1,819 +0,0 @@ -package config - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "os" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression" -) - -// CredentialsSourceName provides a name of the provider when config is -// loaded from environment. -const CredentialsSourceName = "EnvConfigCredentials" - -// Environment variables that will be read for configuration values. -const ( - awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID" - awsAccessKeyEnvVar = "AWS_ACCESS_KEY" - - awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY" - awsSecretKeyEnvVar = "AWS_SECRET_KEY" - - awsSessionTokenEnvVar = "AWS_SESSION_TOKEN" - - awsContainerCredentialsEndpointEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" - awsContainerCredentialsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" - awsContainerPProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" - - awsRegionEnvVar = "AWS_REGION" - awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION" - - awsProfileEnvVar = "AWS_PROFILE" - awsDefaultProfileEnvVar = "AWS_DEFAULT_PROFILE" - - awsSharedCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" - - awsConfigFileEnvVar = "AWS_CONFIG_FILE" - - awsCustomCABundleEnvVar = "AWS_CA_BUNDLE" - - awsWebIdentityTokenFilePathEnvVar = "AWS_WEB_IDENTITY_TOKEN_FILE" - - awsRoleARNEnvVar = "AWS_ROLE_ARN" - awsRoleSessionNameEnvVar = "AWS_ROLE_SESSION_NAME" - - awsEnableEndpointDiscoveryEnvVar = "AWS_ENABLE_ENDPOINT_DISCOVERY" - - awsS3UseARNRegionEnvVar = "AWS_S3_USE_ARN_REGION" - - awsEc2MetadataServiceEndpointModeEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE" - - awsEc2MetadataServiceEndpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" - - awsEc2MetadataDisabled = "AWS_EC2_METADATA_DISABLED" - awsEc2MetadataV1DisabledEnvVar = "AWS_EC2_METADATA_V1_DISABLED" - - awsS3DisableMultiRegionAccessPointEnvVar = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS" - - awsUseDualStackEndpoint = "AWS_USE_DUALSTACK_ENDPOINT" - - awsUseFIPSEndpoint = "AWS_USE_FIPS_ENDPOINT" - - awsDefaultMode = "AWS_DEFAULTS_MODE" - - awsRetryMaxAttempts = "AWS_MAX_ATTEMPTS" - awsRetryMode = "AWS_RETRY_MODE" - awsSdkAppID = "AWS_SDK_UA_APP_ID" - - awsIgnoreConfiguredEndpoints = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS" - awsEndpointURL = "AWS_ENDPOINT_URL" - - awsDisableRequestCompression = "AWS_DISABLE_REQUEST_COMPRESSION" - awsRequestMinCompressionSizeBytes = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES" - - awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH" -) - -var ( - credAccessEnvKeys = []string{ - awsAccessKeyIDEnvVar, - awsAccessKeyEnvVar, - } - credSecretEnvKeys = []string{ - awsSecretAccessKeyEnvVar, - awsSecretKeyEnvVar, - } - regionEnvKeys = []string{ - awsRegionEnvVar, - awsDefaultRegionEnvVar, - } - profileEnvKeys = []string{ - awsProfileEnvVar, - awsDefaultProfileEnvVar, - } -) - -// EnvConfig is a collection of environment values the SDK will read -// setup config from. All environment values are optional. But some values -// such as credentials require multiple values to be complete or the values -// will be ignored. -type EnvConfig struct { - // Environment configuration values. If set both Access Key ID and Secret Access - // Key must be provided. Session Token and optionally also be provided, but is - // not required. - // - // # Access Key ID - // AWS_ACCESS_KEY_ID=AKID - // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - // - // # Secret Access Key - // AWS_SECRET_ACCESS_KEY=SECRET - // AWS_SECRET_KEY=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - // - // # Session Token - // AWS_SESSION_TOKEN=TOKEN - Credentials aws.Credentials - - // ContainerCredentialsEndpoint value is the HTTP enabled endpoint to retrieve credentials - // using the endpointcreds.Provider - ContainerCredentialsEndpoint string - - // ContainerCredentialsRelativePath is the relative URI path that will be used when attempting to retrieve - // credentials from the container endpoint. - ContainerCredentialsRelativePath string - - // ContainerAuthorizationToken is the authorization token that will be included in the HTTP Authorization - // header when attempting to retrieve credentials from the container credentials endpoint. - ContainerAuthorizationToken string - - // Region value will instruct the SDK where to make service API requests to. If is - // not provided in the environment the region must be provided before a service - // client request is made. - // - // AWS_REGION=us-west-2 - // AWS_DEFAULT_REGION=us-west-2 - Region string - - // Profile name the SDK should load use when loading shared configuration from the - // shared configuration files. If not provided "default" will be used as the - // profile name. - // - // AWS_PROFILE=my_profile - // AWS_DEFAULT_PROFILE=my_profile - SharedConfigProfile string - - // Shared credentials file path can be set to instruct the SDK to use an alternate - // file for the shared credentials. If not set the file will be loaded from - // $HOME/.aws/credentials on Linux/Unix based systems, and - // %USERPROFILE%\.aws\credentials on Windows. - // - // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - SharedCredentialsFile string - - // Shared config file path can be set to instruct the SDK to use an alternate - // file for the shared config. If not set the file will be loaded from - // $HOME/.aws/config on Linux/Unix based systems, and - // %USERPROFILE%\.aws\config on Windows. - // - // AWS_CONFIG_FILE=$HOME/my_shared_config - SharedConfigFile string - - // Sets the path to a custom Credentials Authority (CA) Bundle PEM file - // that the SDK will use instead of the system's root CA bundle. - // Only use this if you want to configure the SDK to use a custom set - // of CAs. - // - // Enabling this option will attempt to merge the Transport - // into the SDK's HTTP client. If the client's Transport is - // not a http.Transport an error will be returned. If the - // Transport's TLS config is set this option will cause the - // SDK to overwrite the Transport's TLS config's RootCAs value. - // - // Setting a custom HTTPClient in the aws.Config options will override this setting. - // To use this option and custom HTTP client, the HTTP client needs to be provided - // when creating the config. Not the service client. - // - // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - CustomCABundle string - - // Enables endpoint discovery via environment variables. - // - // AWS_ENABLE_ENDPOINT_DISCOVERY=true - EnableEndpointDiscovery aws.EndpointDiscoveryEnableState - - // Specifies the WebIdentity token the SDK should use to assume a role - // with. - // - // AWS_WEB_IDENTITY_TOKEN_FILE=file_path - WebIdentityTokenFilePath string - - // Specifies the IAM role arn to use when assuming an role. - // - // AWS_ROLE_ARN=role_arn - RoleARN string - - // Specifies the IAM role session name to use when assuming a role. - // - // AWS_ROLE_SESSION_NAME=session_name - RoleSessionName string - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // AWS_S3_USE_ARN_REGION=true - S3UseARNRegion *bool - - // Specifies if the EC2 IMDS service client is enabled. - // - // AWS_EC2_METADATA_DISABLED=true - EC2IMDSClientEnableState imds.ClientEnableState - - // Specifies if EC2 IMDSv1 fallback is disabled. - // - // AWS_EC2_METADATA_V1_DISABLED=true - EC2IMDSv1Disabled *bool - - // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 - EC2IMDSEndpointMode imds.EndpointModeState - - // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://fd00:ec2::254 - EC2IMDSEndpoint string - - // Specifies if the S3 service should disable multi-region access points - // support. - // - // AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS=true - S3DisableMultiRegionAccessPoints *bool - - // Specifies that SDK clients must resolve a dual-stack endpoint for - // services. - // - // AWS_USE_DUALSTACK_ENDPOINT=true - UseDualStackEndpoint aws.DualStackEndpointState - - // Specifies that SDK clients must resolve a FIPS endpoint for - // services. - // - // AWS_USE_FIPS_ENDPOINT=true - UseFIPSEndpoint aws.FIPSEndpointState - - // Specifies the SDK Defaults Mode used by services. - // - // AWS_DEFAULTS_MODE=standard - DefaultsMode aws.DefaultsMode - - // Specifies the maximum number attempts an API client will call an - // operation that fails with a retryable error. - // - // AWS_MAX_ATTEMPTS=3 - RetryMaxAttempts int - - // Specifies the retry model the API client will be created with. - // - // aws_retry_mode=standard - RetryMode aws.RetryMode - - // aws sdk app ID that can be added to user agent header string - AppID string - - // Flag used to disable configured endpoints. - IgnoreConfiguredEndpoints *bool - - // Value to contain configured endpoints to be propagated to - // corresponding endpoint resolution field. - BaseEndpoint string - - // determine if request compression is allowed, default to false - // retrieved from env var AWS_DISABLE_REQUEST_COMPRESSION - DisableRequestCompression *bool - - // inclusive threshold request body size to trigger compression, - // default to 10240 and must be within 0 and 10485760 bytes inclusive - // retrieved from env var AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES - RequestMinCompressSizeBytes *int64 - - // Whether S3Express auth is disabled. - // - // This will NOT prevent requests from being made to S3Express buckets, it - // will only bypass the modified endpoint routing and signing behaviors - // associated with the feature. - S3DisableExpressAuth *bool -} - -// loadEnvConfig reads configuration values from the OS's environment variables. -// Returning the a Config typed EnvConfig to satisfy the ConfigLoader func type. -func loadEnvConfig(ctx context.Context, cfgs configs) (Config, error) { - return NewEnvConfig() -} - -// NewEnvConfig retrieves the SDK's environment configuration. -// See `EnvConfig` for the values that will be retrieved. -func NewEnvConfig() (EnvConfig, error) { - var cfg EnvConfig - - creds := aws.Credentials{ - Source: CredentialsSourceName, - } - setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys) - setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys) - if creds.HasKeys() { - creds.SessionToken = os.Getenv(awsSessionTokenEnvVar) - cfg.Credentials = creds - } - - cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsEndpointEnvVar) - cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativePathEnvVar) - cfg.ContainerAuthorizationToken = os.Getenv(awsContainerPProviderAuthorizationEnvVar) - - setStringFromEnvVal(&cfg.Region, regionEnvKeys) - setStringFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys) - - cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnvVar) - cfg.SharedConfigFile = os.Getenv(awsConfigFileEnvVar) - - cfg.CustomCABundle = os.Getenv(awsCustomCABundleEnvVar) - - cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFilePathEnvVar) - - cfg.RoleARN = os.Getenv(awsRoleARNEnvVar) - cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnvVar) - - cfg.AppID = os.Getenv(awsSdkAppID) - - if err := setBoolPtrFromEnvVal(&cfg.DisableRequestCompression, []string{awsDisableRequestCompression}); err != nil { - return cfg, err - } - if err := setInt64PtrFromEnvVal(&cfg.RequestMinCompressSizeBytes, []string{awsRequestMinCompressionSizeBytes}, smithyrequestcompression.MaxRequestMinCompressSizeBytes); err != nil { - return cfg, err - } - - if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil { - return cfg, err - } - - if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnvVar}); err != nil { - return cfg, err - } - - setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabled}) - if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnvVar}); err != nil { - return cfg, err - } - cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnvVar) - if err := setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, []string{awsEc2MetadataV1DisabledEnvVar}); err != nil { - return cfg, err - } - - if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointEnvVar}); err != nil { - return cfg, err - } - - if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpoint}); err != nil { - return cfg, err - } - - if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpoint}); err != nil { - return cfg, err - } - - if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultMode}); err != nil { - return cfg, err - } - - if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsRetryMaxAttempts}); err != nil { - return cfg, err - } - if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryMode}); err != nil { - return cfg, err - } - - setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURL}) - - if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpoints}); err != nil { - return cfg, err - } - - if err := setBoolPtrFromEnvVal(&cfg.S3DisableExpressAuth, []string{awsS3DisableExpressSessionAuthEnv}); err != nil { - return cfg, err - } - - return cfg, nil -} - -func (c EnvConfig) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { - if len(c.DefaultsMode) == 0 { - return "", false, nil - } - return c.DefaultsMode, true, nil -} - -func (c EnvConfig) getAppID(context.Context) (string, bool, error) { - return c.AppID, len(c.AppID) > 0, nil -} - -func (c EnvConfig) getDisableRequestCompression(context.Context) (bool, bool, error) { - if c.DisableRequestCompression == nil { - return false, false, nil - } - return *c.DisableRequestCompression, true, nil -} - -func (c EnvConfig) getRequestMinCompressSizeBytes(context.Context) (int64, bool, error) { - if c.RequestMinCompressSizeBytes == nil { - return 0, false, nil - } - return *c.RequestMinCompressSizeBytes, true, nil -} - -// GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, -// and not 0. -func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { - if c.RetryMaxAttempts == 0 { - return 0, false, nil - } - return c.RetryMaxAttempts, true, nil -} - -// GetRetryMode returns the RetryMode of AWS_RETRY_MODE if was specified, and a -// valid value. -func (c EnvConfig) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { - if len(c.RetryMode) == 0 { - return "", false, nil - } - return c.RetryMode, true, nil -} - -func setEC2IMDSClientEnableState(state *imds.ClientEnableState, keys []string) { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - switch { - case strings.EqualFold(value, "true"): - *state = imds.ClientDisabled - case strings.EqualFold(value, "false"): - *state = imds.ClientEnabled - default: - continue - } - break - } -} - -func setDefaultsModeFromEnvVal(mode *aws.DefaultsMode, keys []string) error { - for _, k := range keys { - if value := os.Getenv(k); len(value) > 0 { - if ok := mode.SetFromString(value); !ok { - return fmt.Errorf("invalid %s value: %s", k, value) - } - break - } - } - return nil -} - -func setRetryModeFromEnvVal(mode *aws.RetryMode, keys []string) (err error) { - for _, k := range keys { - if value := os.Getenv(k); len(value) > 0 { - *mode, err = aws.ParseRetryMode(value) - if err != nil { - return fmt.Errorf("invalid %s value, %w", k, err) - } - break - } - } - return nil -} - -func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - if err := mode.SetFromString(value); err != nil { - return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err) - } - } - return nil -} - -// GetRegion returns the AWS Region if set in the environment. Returns an empty -// string if not set. -func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) { - if len(c.Region) == 0 { - return "", false, nil - } - return c.Region, true, nil -} - -// GetSharedConfigProfile returns the shared config profile if set in the -// environment. Returns an empty string if not set. -func (c EnvConfig) getSharedConfigProfile(ctx context.Context) (string, bool, error) { - if len(c.SharedConfigProfile) == 0 { - return "", false, nil - } - - return c.SharedConfigProfile, true, nil -} - -// getSharedConfigFiles returns a slice of filenames set in the environment. -// -// Will return the filenames in the order of: -// * Shared Config -func (c EnvConfig) getSharedConfigFiles(context.Context) ([]string, bool, error) { - var files []string - if v := c.SharedConfigFile; len(v) > 0 { - files = append(files, v) - } - - if len(files) == 0 { - return nil, false, nil - } - return files, true, nil -} - -// getSharedCredentialsFiles returns a slice of filenames set in the environment. -// -// Will return the filenames in the order of: -// * Shared Credentials -func (c EnvConfig) getSharedCredentialsFiles(context.Context) ([]string, bool, error) { - var files []string - if v := c.SharedCredentialsFile; len(v) > 0 { - files = append(files, v) - } - if len(files) == 0 { - return nil, false, nil - } - return files, true, nil -} - -// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was -func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { - if len(c.CustomCABundle) == 0 { - return nil, false, nil - } - - b, err := ioutil.ReadFile(c.CustomCABundle) - if err != nil { - return nil, false, err - } - return bytes.NewReader(b), true, nil -} - -// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured -// endpoints feature. -func (c EnvConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { - if c.IgnoreConfiguredEndpoints == nil { - return false, false, nil - } - - return *c.IgnoreConfiguredEndpoints, true, nil -} - -func (c EnvConfig) getBaseEndpoint(context.Context) (string, bool, error) { - return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil -} - -// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use -// with configured endpoints. -func (c EnvConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { - if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURL, normalizeEnv(sdkID))); endpt != "" { - return endpt, true, nil - } - return "", false, nil -} - -func normalizeEnv(sdkID string) string { - upper := strings.ToUpper(sdkID) - return strings.ReplaceAll(upper, " ", "_") -} - -// GetS3UseARNRegion returns whether to allow ARNs to direct the region -// the S3 client's requests are sent to. -func (c EnvConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { - if c.S3UseARNRegion == nil { - return false, false, nil - } - - return *c.S3UseARNRegion, true, nil -} - -// GetS3DisableMultiRegionAccessPoints returns whether to disable multi-region access point -// support for the S3 client. -func (c EnvConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { - if c.S3DisableMultiRegionAccessPoints == nil { - return false, false, nil - } - - return *c.S3DisableMultiRegionAccessPoints, true, nil -} - -// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be -// used for requests. -func (c EnvConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { - if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { - return aws.DualStackEndpointStateUnset, false, nil - } - - return c.UseDualStackEndpoint, true, nil -} - -// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be -// used for requests. -func (c EnvConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { - if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { - return aws.FIPSEndpointStateUnset, false, nil - } - - return c.UseFIPSEndpoint, true, nil -} - -func setStringFromEnvVal(dst *string, keys []string) { - for _, k := range keys { - if v := os.Getenv(k); len(v) > 0 { - *dst = v - break - } - } -} - -func setIntFromEnvVal(dst *int, keys []string) error { - for _, k := range keys { - if v := os.Getenv(k); len(v) > 0 { - i, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("invalid value %s=%s, %w", k, v, err) - } - *dst = int(i) - break - } - } - - return nil -} - -func setBoolPtrFromEnvVal(dst **bool, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - - if *dst == nil { - *dst = new(bool) - } - - switch { - case strings.EqualFold(value, "false"): - **dst = false - case strings.EqualFold(value, "true"): - **dst = true - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true or false", - k, value) - } - break - } - - return nil -} - -func setInt64PtrFromEnvVal(dst **int64, keys []string, max int64) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - - v, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return fmt.Errorf("invalid value for env var, %s=%s, need int64", k, value) - } else if v < 0 || v > max { - return fmt.Errorf("invalid range for env var min request compression size bytes %q, must be within 0 and 10485760 inclusively", v) - } - if *dst == nil { - *dst = new(int64) - } - - **dst = v - break - } - - return nil -} - -func setEndpointDiscoveryTypeFromEnvVal(dst *aws.EndpointDiscoveryEnableState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue // skip if empty - } - - switch { - case strings.EqualFold(value, endpointDiscoveryDisabled): - *dst = aws.EndpointDiscoveryDisabled - case strings.EqualFold(value, endpointDiscoveryEnabled): - *dst = aws.EndpointDiscoveryEnabled - case strings.EqualFold(value, endpointDiscoveryAuto): - *dst = aws.EndpointDiscoveryAuto - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true, false or auto", - k, value) - } - } - return nil -} - -func setUseDualStackEndpointFromEnvVal(dst *aws.DualStackEndpointState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue // skip if empty - } - - switch { - case strings.EqualFold(value, "true"): - *dst = aws.DualStackEndpointStateEnabled - case strings.EqualFold(value, "false"): - *dst = aws.DualStackEndpointStateDisabled - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true, false", - k, value) - } - } - return nil -} - -func setUseFIPSEndpointFromEnvVal(dst *aws.FIPSEndpointState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue // skip if empty - } - - switch { - case strings.EqualFold(value, "true"): - *dst = aws.FIPSEndpointStateEnabled - case strings.EqualFold(value, "false"): - *dst = aws.FIPSEndpointStateDisabled - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true, false", - k, value) - } - } - return nil -} - -// GetEnableEndpointDiscovery returns resolved value for EnableEndpointDiscovery env variable setting. -func (c EnvConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) { - if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { - return aws.EndpointDiscoveryUnset, false, nil - } - - return c.EnableEndpointDiscovery, true, nil -} - -// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. -func (c EnvConfig) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { - if c.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { - return imds.ClientDefaultEnableState, false, nil - } - - return c.EC2IMDSClientEnableState, true, nil -} - -// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. -func (c EnvConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { - if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { - return imds.EndpointModeStateUnset, false, nil - } - - return c.EC2IMDSEndpointMode, true, nil -} - -// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. -func (c EnvConfig) GetEC2IMDSEndpoint() (string, bool, error) { - if len(c.EC2IMDSEndpoint) == 0 { - return "", false, nil - } - - return c.EC2IMDSEndpoint, true, nil -} - -// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option -// resolver interface. -func (c EnvConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { - if c.EC2IMDSv1Disabled == nil { - return false, false - } - - return *c.EC2IMDSv1Disabled, true -} - -// GetS3DisableExpressAuth returns the configured value for -// [EnvConfig.S3DisableExpressAuth]. -func (c EnvConfig) GetS3DisableExpressAuth() (value, ok bool) { - if c.S3DisableExpressAuth == nil { - return false, false - } - - return *c.S3DisableExpressAuth, true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go deleted file mode 100644 index 654a7a77fb..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go +++ /dev/null @@ -1,4 +0,0 @@ -package config - -//go:generate go run -tags codegen ./codegen -output=provider_assert_test.go -//go:generate gofmt -s -w ./ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go deleted file mode 100644 index 00ee204918..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package config - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.27.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go deleted file mode 100644 index 06596c1b7c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go +++ /dev/null @@ -1,1114 +0,0 @@ -package config - -import ( - "context" - "io" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" - "github.com/aws/aws-sdk-go-v2/credentials/processcreds" - "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" - "github.com/aws/aws-sdk-go-v2/credentials/stscreds" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - smithybearer "github.com/aws/smithy-go/auth/bearer" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// LoadOptionsFunc is a type alias for LoadOptions functional option -type LoadOptionsFunc func(*LoadOptions) error - -// LoadOptions are discrete set of options that are valid for loading the -// configuration -type LoadOptions struct { - - // Region is the region to send requests to. - Region string - - // Credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // Token provider for authentication operations with bearer authentication. - BearerAuthTokenProvider smithybearer.TokenProvider - - // HTTPClient the SDK's API clients will use to invoke HTTP requests. - HTTPClient HTTPClient - - // EndpointResolver that can be used to provide or override an endpoint for - // the given service and region. - // - // See the `aws.EndpointResolver` documentation on usage. - // - // Deprecated: See EndpointResolverWithOptions - EndpointResolver aws.EndpointResolver - - // EndpointResolverWithOptions that can be used to provide or override an - // endpoint for the given service and region. - // - // See the `aws.EndpointResolverWithOptions` documentation on usage. - EndpointResolverWithOptions aws.EndpointResolverWithOptions - - // RetryMaxAttempts specifies the maximum number attempts an API client - // will call an operation that fails with a retryable error. - // - // This value will only be used if Retryer option is nil. - RetryMaxAttempts int - - // RetryMode specifies the retry model the API client will be created with. - // - // This value will only be used if Retryer option is nil. - RetryMode aws.RetryMode - - // Retryer is a function that provides a Retryer implementation. A Retryer - // guides how HTTP requests should be retried in case of recoverable - // failures. - // - // If not nil, RetryMaxAttempts, and RetryMode will be ignored. - Retryer func() aws.Retryer - - // APIOptions provides the set of middleware mutations modify how the API - // client requests will be handled. This is useful for adding additional - // tracing data to a request, or changing behavior of the SDK's client. - APIOptions []func(*middleware.Stack) error - - // Logger writer interface to write logging messages to. - Logger logging.Logger - - // ClientLogMode is used to configure the events that will be sent to the - // configured logger. This can be used to configure the logging of signing, - // retries, request, and responses of the SDK clients. - // - // See the ClientLogMode type documentation for the complete set of logging - // modes and available configuration. - ClientLogMode *aws.ClientLogMode - - // SharedConfigProfile is the profile to be used when loading the SharedConfig - SharedConfigProfile string - - // SharedConfigFiles is the slice of custom shared config files to use when - // loading the SharedConfig. A non-default profile used within config file - // must have name defined with prefix 'profile '. eg [profile xyz] - // indicates a profile with name 'xyz'. To read more on the format of the - // config file, please refer the documentation at - // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-config - // - // If duplicate profiles are provided within the same, or across multiple - // shared config files, the next parsed profile will override only the - // properties that conflict with the previously defined profile. Note that - // if duplicate profiles are provided within the SharedCredentialsFiles and - // SharedConfigFiles, the properties defined in shared credentials file - // take precedence. - SharedConfigFiles []string - - // SharedCredentialsFile is the slice of custom shared credentials files to - // use when loading the SharedConfig. The profile name used within - // credentials file must not prefix 'profile '. eg [xyz] indicates a - // profile with name 'xyz'. Profile declared as [profile xyz] will be - // ignored. To read more on the format of the credentials file, please - // refer the documentation at - // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-creds - // - // If duplicate profiles are provided with a same, or across multiple - // shared credentials files, the next parsed profile will override only - // properties that conflict with the previously defined profile. Note that - // if duplicate profiles are provided within the SharedCredentialsFiles and - // SharedConfigFiles, the properties defined in shared credentials file - // take precedence. - SharedCredentialsFiles []string - - // CustomCABundle is CA bundle PEM bytes reader - CustomCABundle io.Reader - - // DefaultRegion is the fall back region, used if a region was not resolved - // from other sources - DefaultRegion string - - // UseEC2IMDSRegion indicates if SDK should retrieve the region - // from the EC2 Metadata service - UseEC2IMDSRegion *UseEC2IMDSRegion - - // CredentialsCacheOptions is a function for setting the - // aws.CredentialsCacheOptions - CredentialsCacheOptions func(*aws.CredentialsCacheOptions) - - // BearerAuthTokenCacheOptions is a function for setting the smithy-go - // auth/bearer#TokenCacheOptions - BearerAuthTokenCacheOptions func(*smithybearer.TokenCacheOptions) - - // SSOTokenProviderOptions is a function for setting the - // credentials/ssocreds.SSOTokenProviderOptions - SSOTokenProviderOptions func(*ssocreds.SSOTokenProviderOptions) - - // ProcessCredentialOptions is a function for setting - // the processcreds.Options - ProcessCredentialOptions func(*processcreds.Options) - - // EC2RoleCredentialOptions is a function for setting - // the ec2rolecreds.Options - EC2RoleCredentialOptions func(*ec2rolecreds.Options) - - // EndpointCredentialOptions is a function for setting - // the endpointcreds.Options - EndpointCredentialOptions func(*endpointcreds.Options) - - // WebIdentityRoleCredentialOptions is a function for setting - // the stscreds.WebIdentityRoleOptions - WebIdentityRoleCredentialOptions func(*stscreds.WebIdentityRoleOptions) - - // AssumeRoleCredentialOptions is a function for setting the - // stscreds.AssumeRoleOptions - AssumeRoleCredentialOptions func(*stscreds.AssumeRoleOptions) - - // SSOProviderOptions is a function for setting - // the ssocreds.Options - SSOProviderOptions func(options *ssocreds.Options) - - // LogConfigurationWarnings when set to true, enables logging - // configuration warnings - LogConfigurationWarnings *bool - - // S3UseARNRegion specifies if the S3 service should allow ARNs to direct - // the region, the client's requests are sent to. - S3UseARNRegion *bool - - // S3DisableMultiRegionAccessPoints specifies if the S3 service should disable - // the S3 Multi-Region access points feature. - S3DisableMultiRegionAccessPoints *bool - - // EnableEndpointDiscovery specifies if endpoint discovery is enable for - // the client. - EnableEndpointDiscovery aws.EndpointDiscoveryEnableState - - // Specifies if the EC2 IMDS service client is enabled. - // - // AWS_EC2_METADATA_DISABLED=true - EC2IMDSClientEnableState imds.ClientEnableState - - // Specifies the EC2 Instance Metadata Service default endpoint selection - // mode (IPv4 or IPv6) - EC2IMDSEndpointMode imds.EndpointModeState - - // Specifies the EC2 Instance Metadata Service endpoint to use. If - // specified it overrides EC2IMDSEndpointMode. - EC2IMDSEndpoint string - - // Specifies that SDK clients must resolve a dual-stack endpoint for - // services. - UseDualStackEndpoint aws.DualStackEndpointState - - // Specifies that SDK clients must resolve a FIPS endpoint for - // services. - UseFIPSEndpoint aws.FIPSEndpointState - - // Specifies the SDK configuration mode for defaults. - DefaultsModeOptions DefaultsModeOptions - - // The sdk app ID retrieved from env var or shared config to be added to request user agent header - AppID string - - // Specifies whether an operation request could be compressed - DisableRequestCompression *bool - - // The inclusive min bytes of a request body that could be compressed - RequestMinCompressSizeBytes *int64 - - // Whether S3 Express auth is disabled. - S3DisableExpressAuth *bool -} - -func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { - if len(o.DefaultsModeOptions.Mode) == 0 { - return "", false, nil - } - return o.DefaultsModeOptions.Mode, true, nil -} - -// GetRetryMaxAttempts returns the RetryMaxAttempts if specified in the -// LoadOptions and not 0. -func (o LoadOptions) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { - if o.RetryMaxAttempts == 0 { - return 0, false, nil - } - return o.RetryMaxAttempts, true, nil -} - -// GetRetryMode returns the RetryMode specified in the LoadOptions. -func (o LoadOptions) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { - if len(o.RetryMode) == 0 { - return "", false, nil - } - return o.RetryMode, true, nil -} - -func (o LoadOptions) getDefaultsModeIMDSClient(ctx context.Context) (*imds.Client, bool, error) { - if o.DefaultsModeOptions.IMDSClient == nil { - return nil, false, nil - } - return o.DefaultsModeOptions.IMDSClient, true, nil -} - -// getRegion returns Region from config's LoadOptions -func (o LoadOptions) getRegion(ctx context.Context) (string, bool, error) { - if len(o.Region) == 0 { - return "", false, nil - } - - return o.Region, true, nil -} - -// getAppID returns AppID from config's LoadOptions -func (o LoadOptions) getAppID(ctx context.Context) (string, bool, error) { - return o.AppID, len(o.AppID) > 0, nil -} - -// getDisableRequestCompression returns DisableRequestCompression from config's LoadOptions -func (o LoadOptions) getDisableRequestCompression(ctx context.Context) (bool, bool, error) { - if o.DisableRequestCompression == nil { - return false, false, nil - } - return *o.DisableRequestCompression, true, nil -} - -// getRequestMinCompressSizeBytes returns RequestMinCompressSizeBytes from config's LoadOptions -func (o LoadOptions) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { - if o.RequestMinCompressSizeBytes == nil { - return 0, false, nil - } - return *o.RequestMinCompressSizeBytes, true, nil -} - -// WithRegion is a helper function to construct functional options -// that sets Region on config's LoadOptions. Setting the region to -// an empty string, will result in the region value being ignored. -// If multiple WithRegion calls are made, the last call overrides -// the previous call values. -func WithRegion(v string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Region = v - return nil - } -} - -// WithAppID is a helper function to construct functional options -// that sets AppID on config's LoadOptions. -func WithAppID(ID string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.AppID = ID - return nil - } -} - -// WithDisableRequestCompression is a helper function to construct functional options -// that sets DisableRequestCompression on config's LoadOptions. -func WithDisableRequestCompression(DisableRequestCompression *bool) LoadOptionsFunc { - return func(o *LoadOptions) error { - if DisableRequestCompression == nil { - return nil - } - o.DisableRequestCompression = DisableRequestCompression - return nil - } -} - -// WithRequestMinCompressSizeBytes is a helper function to construct functional options -// that sets RequestMinCompressSizeBytes on config's LoadOptions. -func WithRequestMinCompressSizeBytes(RequestMinCompressSizeBytes *int64) LoadOptionsFunc { - return func(o *LoadOptions) error { - if RequestMinCompressSizeBytes == nil { - return nil - } - o.RequestMinCompressSizeBytes = RequestMinCompressSizeBytes - return nil - } -} - -// getDefaultRegion returns DefaultRegion from config's LoadOptions -func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) { - if len(o.DefaultRegion) == 0 { - return "", false, nil - } - - return o.DefaultRegion, true, nil -} - -// WithDefaultRegion is a helper function to construct functional options -// that sets a DefaultRegion on config's LoadOptions. Setting the default -// region to an empty string, will result in the default region value -// being ignored. If multiple WithDefaultRegion calls are made, the last -// call overrides the previous call values. Note that both WithRegion and -// WithEC2IMDSRegion call takes precedence over WithDefaultRegion call -// when resolving region. -func WithDefaultRegion(v string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.DefaultRegion = v - return nil - } -} - -// getSharedConfigProfile returns SharedConfigProfile from config's LoadOptions -func (o LoadOptions) getSharedConfigProfile(ctx context.Context) (string, bool, error) { - if len(o.SharedConfigProfile) == 0 { - return "", false, nil - } - - return o.SharedConfigProfile, true, nil -} - -// WithSharedConfigProfile is a helper function to construct functional options -// that sets SharedConfigProfile on config's LoadOptions. Setting the shared -// config profile to an empty string, will result in the shared config profile -// value being ignored. -// If multiple WithSharedConfigProfile calls are made, the last call overrides -// the previous call values. -func WithSharedConfigProfile(v string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.SharedConfigProfile = v - return nil - } -} - -// getSharedConfigFiles returns SharedConfigFiles set on config's LoadOptions -func (o LoadOptions) getSharedConfigFiles(ctx context.Context) ([]string, bool, error) { - if o.SharedConfigFiles == nil { - return nil, false, nil - } - - return o.SharedConfigFiles, true, nil -} - -// WithSharedConfigFiles is a helper function to construct functional options -// that sets slice of SharedConfigFiles on config's LoadOptions. -// Setting the shared config files to an nil string slice, will result in the -// shared config files value being ignored. -// If multiple WithSharedConfigFiles calls are made, the last call overrides -// the previous call values. -func WithSharedConfigFiles(v []string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.SharedConfigFiles = v - return nil - } -} - -// getSharedCredentialsFiles returns SharedCredentialsFiles set on config's LoadOptions -func (o LoadOptions) getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) { - if o.SharedCredentialsFiles == nil { - return nil, false, nil - } - - return o.SharedCredentialsFiles, true, nil -} - -// WithSharedCredentialsFiles is a helper function to construct functional options -// that sets slice of SharedCredentialsFiles on config's LoadOptions. -// Setting the shared credentials files to an nil string slice, will result in the -// shared credentials files value being ignored. -// If multiple WithSharedCredentialsFiles calls are made, the last call overrides -// the previous call values. -func WithSharedCredentialsFiles(v []string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.SharedCredentialsFiles = v - return nil - } -} - -// getCustomCABundle returns CustomCABundle from LoadOptions -func (o LoadOptions) getCustomCABundle(ctx context.Context) (io.Reader, bool, error) { - if o.CustomCABundle == nil { - return nil, false, nil - } - - return o.CustomCABundle, true, nil -} - -// WithCustomCABundle is a helper function to construct functional options -// that sets CustomCABundle on config's LoadOptions. Setting the custom CA Bundle -// to nil will result in custom CA Bundle value being ignored. -// If multiple WithCustomCABundle calls are made, the last call overrides the -// previous call values. -func WithCustomCABundle(v io.Reader) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.CustomCABundle = v - return nil - } -} - -// UseEC2IMDSRegion provides a regionProvider that retrieves the region -// from the EC2 Metadata service. -type UseEC2IMDSRegion struct { - // If unset will default to generic EC2 IMDS client. - Client *imds.Client -} - -// getRegion attempts to retrieve the region from EC2 Metadata service. -func (p *UseEC2IMDSRegion) getRegion(ctx context.Context) (string, bool, error) { - if ctx == nil { - ctx = context.Background() - } - - client := p.Client - if client == nil { - client = imds.New(imds.Options{}) - } - - result, err := client.GetRegion(ctx, nil) - if err != nil { - return "", false, err - } - if len(result.Region) != 0 { - return result.Region, true, nil - } - return "", false, nil -} - -// getEC2IMDSRegion returns the value of EC2 IMDS region. -func (o LoadOptions) getEC2IMDSRegion(ctx context.Context) (string, bool, error) { - if o.UseEC2IMDSRegion == nil { - return "", false, nil - } - - return o.UseEC2IMDSRegion.getRegion(ctx) -} - -// WithEC2IMDSRegion is a helper function to construct functional options -// that enables resolving EC2IMDS region. The function takes -// in a UseEC2IMDSRegion functional option, and can be used to set the -// EC2IMDS client which will be used to resolve EC2IMDSRegion. -// If no functional option is provided, an EC2IMDS client is built and used -// by the resolver. If multiple WithEC2IMDSRegion calls are made, the last -// call overrides the previous call values. Note that the WithRegion calls takes -// precedence over WithEC2IMDSRegion when resolving region. -func WithEC2IMDSRegion(fnOpts ...func(o *UseEC2IMDSRegion)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.UseEC2IMDSRegion = &UseEC2IMDSRegion{} - - for _, fn := range fnOpts { - fn(o.UseEC2IMDSRegion) - } - return nil - } -} - -// getCredentialsProvider returns the credentials value -func (o LoadOptions) getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) { - if o.Credentials == nil { - return nil, false, nil - } - - return o.Credentials, true, nil -} - -// WithCredentialsProvider is a helper function to construct functional options -// that sets Credential provider value on config's LoadOptions. If credentials -// provider is set to nil, the credentials provider value will be ignored. -// If multiple WithCredentialsProvider calls are made, the last call overrides -// the previous call values. -func WithCredentialsProvider(v aws.CredentialsProvider) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Credentials = v - return nil - } -} - -// getCredentialsCacheOptionsProvider returns the wrapped function to set aws.CredentialsCacheOptions -func (o LoadOptions) getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) { - if o.CredentialsCacheOptions == nil { - return nil, false, nil - } - - return o.CredentialsCacheOptions, true, nil -} - -// WithCredentialsCacheOptions is a helper function to construct functional -// options that sets a function to modify the aws.CredentialsCacheOptions the -// aws.CredentialsCache will be configured with, if the CredentialsCache is used -// by the configuration loader. -// -// If multiple WithCredentialsCacheOptions calls are made, the last call -// overrides the previous call values. -func WithCredentialsCacheOptions(v func(*aws.CredentialsCacheOptions)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.CredentialsCacheOptions = v - return nil - } -} - -// getBearerAuthTokenProvider returns the credentials value -func (o LoadOptions) getBearerAuthTokenProvider(ctx context.Context) (smithybearer.TokenProvider, bool, error) { - if o.BearerAuthTokenProvider == nil { - return nil, false, nil - } - - return o.BearerAuthTokenProvider, true, nil -} - -// WithBearerAuthTokenProvider is a helper function to construct functional options -// that sets Credential provider value on config's LoadOptions. If credentials -// provider is set to nil, the credentials provider value will be ignored. -// If multiple WithBearerAuthTokenProvider calls are made, the last call overrides -// the previous call values. -func WithBearerAuthTokenProvider(v smithybearer.TokenProvider) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.BearerAuthTokenProvider = v - return nil - } -} - -// getBearerAuthTokenCacheOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions -func (o LoadOptions) getBearerAuthTokenCacheOptions(ctx context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) { - if o.BearerAuthTokenCacheOptions == nil { - return nil, false, nil - } - - return o.BearerAuthTokenCacheOptions, true, nil -} - -// WithBearerAuthTokenCacheOptions is a helper function to construct functional options -// that sets a function to modify the TokenCacheOptions the smithy-go -// auth/bearer#TokenCache will be configured with, if the TokenCache is used by -// the configuration loader. -// -// If multiple WithBearerAuthTokenCacheOptions calls are made, the last call overrides -// the previous call values. -func WithBearerAuthTokenCacheOptions(v func(*smithybearer.TokenCacheOptions)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.BearerAuthTokenCacheOptions = v - return nil - } -} - -// getSSOTokenProviderOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions -func (o LoadOptions) getSSOTokenProviderOptions(ctx context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) { - if o.SSOTokenProviderOptions == nil { - return nil, false, nil - } - - return o.SSOTokenProviderOptions, true, nil -} - -// WithSSOTokenProviderOptions is a helper function to construct functional -// options that sets a function to modify the SSOtokenProviderOptions the SDK's -// credentials/ssocreds#SSOProvider will be configured with, if the -// SSOTokenProvider is used by the configuration loader. -// -// If multiple WithSSOTokenProviderOptions calls are made, the last call overrides -// the previous call values. -func WithSSOTokenProviderOptions(v func(*ssocreds.SSOTokenProviderOptions)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.SSOTokenProviderOptions = v - return nil - } -} - -// getProcessCredentialOptions returns the wrapped function to set processcreds.Options -func (o LoadOptions) getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) { - if o.ProcessCredentialOptions == nil { - return nil, false, nil - } - - return o.ProcessCredentialOptions, true, nil -} - -// WithProcessCredentialOptions is a helper function to construct functional options -// that sets a function to use processcreds.Options on config's LoadOptions. -// If process credential options is set to nil, the process credential value will -// be ignored. If multiple WithProcessCredentialOptions calls are made, the last call -// overrides the previous call values. -func WithProcessCredentialOptions(v func(*processcreds.Options)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.ProcessCredentialOptions = v - return nil - } -} - -// getEC2RoleCredentialOptions returns the wrapped function to set the ec2rolecreds.Options -func (o LoadOptions) getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) { - if o.EC2RoleCredentialOptions == nil { - return nil, false, nil - } - - return o.EC2RoleCredentialOptions, true, nil -} - -// WithEC2RoleCredentialOptions is a helper function to construct functional options -// that sets a function to use ec2rolecreds.Options on config's LoadOptions. If -// EC2 role credential options is set to nil, the EC2 role credential options value -// will be ignored. If multiple WithEC2RoleCredentialOptions calls are made, -// the last call overrides the previous call values. -func WithEC2RoleCredentialOptions(v func(*ec2rolecreds.Options)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EC2RoleCredentialOptions = v - return nil - } -} - -// getEndpointCredentialOptions returns the wrapped function to set endpointcreds.Options -func (o LoadOptions) getEndpointCredentialOptions(context.Context) (func(*endpointcreds.Options), bool, error) { - if o.EndpointCredentialOptions == nil { - return nil, false, nil - } - - return o.EndpointCredentialOptions, true, nil -} - -// WithEndpointCredentialOptions is a helper function to construct functional options -// that sets a function to use endpointcreds.Options on config's LoadOptions. If -// endpoint credential options is set to nil, the endpoint credential options -// value will be ignored. If multiple WithEndpointCredentialOptions calls are made, -// the last call overrides the previous call values. -func WithEndpointCredentialOptions(v func(*endpointcreds.Options)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EndpointCredentialOptions = v - return nil - } -} - -// getWebIdentityRoleCredentialOptions returns the wrapped function -func (o LoadOptions) getWebIdentityRoleCredentialOptions(context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) { - if o.WebIdentityRoleCredentialOptions == nil { - return nil, false, nil - } - - return o.WebIdentityRoleCredentialOptions, true, nil -} - -// WithWebIdentityRoleCredentialOptions is a helper function to construct -// functional options that sets a function to use stscreds.WebIdentityRoleOptions -// on config's LoadOptions. If web identity role credentials options is set to nil, -// the web identity role credentials value will be ignored. If multiple -// WithWebIdentityRoleCredentialOptions calls are made, the last call -// overrides the previous call values. -func WithWebIdentityRoleCredentialOptions(v func(*stscreds.WebIdentityRoleOptions)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.WebIdentityRoleCredentialOptions = v - return nil - } -} - -// getAssumeRoleCredentialOptions returns AssumeRoleCredentialOptions from LoadOptions -func (o LoadOptions) getAssumeRoleCredentialOptions(context.Context) (func(options *stscreds.AssumeRoleOptions), bool, error) { - if o.AssumeRoleCredentialOptions == nil { - return nil, false, nil - } - - return o.AssumeRoleCredentialOptions, true, nil -} - -// WithAssumeRoleCredentialOptions is a helper function to construct -// functional options that sets a function to use stscreds.AssumeRoleOptions -// on config's LoadOptions. If assume role credentials options is set to nil, -// the assume role credentials value will be ignored. If multiple -// WithAssumeRoleCredentialOptions calls are made, the last call overrides -// the previous call values. -func WithAssumeRoleCredentialOptions(v func(*stscreds.AssumeRoleOptions)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.AssumeRoleCredentialOptions = v - return nil - } -} - -func (o LoadOptions) getHTTPClient(ctx context.Context) (HTTPClient, bool, error) { - if o.HTTPClient == nil { - return nil, false, nil - } - - return o.HTTPClient, true, nil -} - -// WithHTTPClient is a helper function to construct functional options -// that sets HTTPClient on LoadOptions. If HTTPClient is set to nil, -// the HTTPClient value will be ignored. -// If multiple WithHTTPClient calls are made, the last call overrides -// the previous call values. -func WithHTTPClient(v HTTPClient) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.HTTPClient = v - return nil - } -} - -func (o LoadOptions) getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) { - if o.APIOptions == nil { - return nil, false, nil - } - - return o.APIOptions, true, nil -} - -// WithAPIOptions is a helper function to construct functional options -// that sets APIOptions on LoadOptions. If APIOptions is set to nil, the -// APIOptions value is ignored. If multiple WithAPIOptions calls are -// made, the last call overrides the previous call values. -func WithAPIOptions(v []func(*middleware.Stack) error) LoadOptionsFunc { - return func(o *LoadOptions) error { - if v == nil { - return nil - } - - o.APIOptions = append(o.APIOptions, v...) - return nil - } -} - -func (o LoadOptions) getRetryMaxAttempts(ctx context.Context) (int, bool, error) { - if o.RetryMaxAttempts == 0 { - return 0, false, nil - } - - return o.RetryMaxAttempts, true, nil -} - -// WithRetryMaxAttempts is a helper function to construct functional options that sets -// RetryMaxAttempts on LoadOptions. If RetryMaxAttempts is unset, the RetryMaxAttempts value is -// ignored. If multiple WithRetryMaxAttempts calls are made, the last call overrides -// the previous call values. -// -// Will be ignored of LoadOptions.Retryer or WithRetryer are used. -func WithRetryMaxAttempts(v int) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.RetryMaxAttempts = v - return nil - } -} - -func (o LoadOptions) getRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { - if o.RetryMode == "" { - return "", false, nil - } - - return o.RetryMode, true, nil -} - -// WithRetryMode is a helper function to construct functional options that sets -// RetryMode on LoadOptions. If RetryMode is unset, the RetryMode value is -// ignored. If multiple WithRetryMode calls are made, the last call overrides -// the previous call values. -// -// Will be ignored of LoadOptions.Retryer or WithRetryer are used. -func WithRetryMode(v aws.RetryMode) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.RetryMode = v - return nil - } -} - -func (o LoadOptions) getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) { - if o.Retryer == nil { - return nil, false, nil - } - - return o.Retryer, true, nil -} - -// WithRetryer is a helper function to construct functional options -// that sets Retryer on LoadOptions. If Retryer is set to nil, the -// Retryer value is ignored. If multiple WithRetryer calls are -// made, the last call overrides the previous call values. -func WithRetryer(v func() aws.Retryer) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Retryer = v - return nil - } -} - -func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) { - if o.EndpointResolver == nil { - return nil, false, nil - } - - return o.EndpointResolver, true, nil -} - -// WithEndpointResolver is a helper function to construct functional options -// that sets the EndpointResolver on LoadOptions. If the EndpointResolver is set to nil, -// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls -// are made, the last call overrides the previous call values. -// -// Deprecated: See WithEndpointResolverWithOptions -func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EndpointResolver = v - return nil - } -} - -func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) { - if o.EndpointResolverWithOptions == nil { - return nil, false, nil - } - - return o.EndpointResolverWithOptions, true, nil -} - -// WithEndpointResolverWithOptions is a helper function to construct functional options -// that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil, -// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls -// are made, the last call overrides the previous call values. -func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EndpointResolverWithOptions = v - return nil - } -} - -func (o LoadOptions) getLogger(ctx context.Context) (logging.Logger, bool, error) { - if o.Logger == nil { - return nil, false, nil - } - - return o.Logger, true, nil -} - -// WithLogger is a helper function to construct functional options -// that sets Logger on LoadOptions. If Logger is set to nil, the -// Logger value will be ignored. If multiple WithLogger calls are made, -// the last call overrides the previous call values. -func WithLogger(v logging.Logger) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Logger = v - return nil - } -} - -func (o LoadOptions) getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) { - if o.ClientLogMode == nil { - return 0, false, nil - } - - return *o.ClientLogMode, true, nil -} - -// WithClientLogMode is a helper function to construct functional options -// that sets client log mode on LoadOptions. If client log mode is set to nil, -// the client log mode value will be ignored. If multiple WithClientLogMode calls are made, -// the last call overrides the previous call values. -func WithClientLogMode(v aws.ClientLogMode) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.ClientLogMode = &v - return nil - } -} - -func (o LoadOptions) getLogConfigurationWarnings(ctx context.Context) (v bool, found bool, err error) { - if o.LogConfigurationWarnings == nil { - return false, false, nil - } - return *o.LogConfigurationWarnings, true, nil -} - -// WithLogConfigurationWarnings is a helper function to construct -// functional options that can be used to set LogConfigurationWarnings -// on LoadOptions. -// -// If multiple WithLogConfigurationWarnings calls are made, the last call -// overrides the previous call values. -func WithLogConfigurationWarnings(v bool) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.LogConfigurationWarnings = &v - return nil - } -} - -// GetS3UseARNRegion returns whether to allow ARNs to direct the region -// the S3 client's requests are sent to. -func (o LoadOptions) GetS3UseARNRegion(ctx context.Context) (v bool, found bool, err error) { - if o.S3UseARNRegion == nil { - return false, false, nil - } - return *o.S3UseARNRegion, true, nil -} - -// WithS3UseARNRegion is a helper function to construct functional options -// that can be used to set S3UseARNRegion on LoadOptions. -// If multiple WithS3UseARNRegion calls are made, the last call overrides -// the previous call values. -func WithS3UseARNRegion(v bool) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.S3UseARNRegion = &v - return nil - } -} - -// GetS3DisableMultiRegionAccessPoints returns whether to disable -// the S3 multi-region access points feature. -func (o LoadOptions) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (v bool, found bool, err error) { - if o.S3DisableMultiRegionAccessPoints == nil { - return false, false, nil - } - return *o.S3DisableMultiRegionAccessPoints, true, nil -} - -// WithS3DisableMultiRegionAccessPoints is a helper function to construct functional options -// that can be used to set S3DisableMultiRegionAccessPoints on LoadOptions. -// If multiple WithS3DisableMultiRegionAccessPoints calls are made, the last call overrides -// the previous call values. -func WithS3DisableMultiRegionAccessPoints(v bool) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.S3DisableMultiRegionAccessPoints = &v - return nil - } -} - -// GetEnableEndpointDiscovery returns if the EnableEndpointDiscovery flag is set. -func (o LoadOptions) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { - if o.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { - return aws.EndpointDiscoveryUnset, false, nil - } - return o.EnableEndpointDiscovery, true, nil -} - -// WithEndpointDiscovery is a helper function to construct functional options -// that can be used to enable endpoint discovery on LoadOptions for supported clients. -// If multiple WithEndpointDiscovery calls are made, the last call overrides -// the previous call values. -func WithEndpointDiscovery(v aws.EndpointDiscoveryEnableState) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EnableEndpointDiscovery = v - return nil - } -} - -// getSSOProviderOptions returns AssumeRoleCredentialOptions from LoadOptions -func (o LoadOptions) getSSOProviderOptions(context.Context) (func(options *ssocreds.Options), bool, error) { - if o.SSOProviderOptions == nil { - return nil, false, nil - } - - return o.SSOProviderOptions, true, nil -} - -// WithSSOProviderOptions is a helper function to construct -// functional options that sets a function to use ssocreds.Options -// on config's LoadOptions. If the SSO credential provider options is set to nil, -// the sso provider options value will be ignored. If multiple -// WithSSOProviderOptions calls are made, the last call overrides -// the previous call values. -func WithSSOProviderOptions(v func(*ssocreds.Options)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.SSOProviderOptions = v - return nil - } -} - -// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. -func (o LoadOptions) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { - if o.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { - return imds.ClientDefaultEnableState, false, nil - } - - return o.EC2IMDSClientEnableState, true, nil -} - -// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. -func (o LoadOptions) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { - if o.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { - return imds.EndpointModeStateUnset, false, nil - } - - return o.EC2IMDSEndpointMode, true, nil -} - -// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. -func (o LoadOptions) GetEC2IMDSEndpoint() (string, bool, error) { - if len(o.EC2IMDSEndpoint) == 0 { - return "", false, nil - } - - return o.EC2IMDSEndpoint, true, nil -} - -// WithEC2IMDSClientEnableState is a helper function to construct functional options that sets the EC2IMDSClientEnableState. -func WithEC2IMDSClientEnableState(v imds.ClientEnableState) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EC2IMDSClientEnableState = v - return nil - } -} - -// WithEC2IMDSEndpointMode is a helper function to construct functional options that sets the EC2IMDSEndpointMode. -func WithEC2IMDSEndpointMode(v imds.EndpointModeState) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EC2IMDSEndpointMode = v - return nil - } -} - -// WithEC2IMDSEndpoint is a helper function to construct functional options that sets the EC2IMDSEndpoint. -func WithEC2IMDSEndpoint(v string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EC2IMDSEndpoint = v - return nil - } -} - -// WithUseDualStackEndpoint is a helper function to construct -// functional options that can be used to set UseDualStackEndpoint on LoadOptions. -func WithUseDualStackEndpoint(v aws.DualStackEndpointState) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.UseDualStackEndpoint = v - return nil - } -} - -// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be -// used for requests. -func (o LoadOptions) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { - if o.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { - return aws.DualStackEndpointStateUnset, false, nil - } - return o.UseDualStackEndpoint, true, nil -} - -// WithUseFIPSEndpoint is a helper function to construct -// functional options that can be used to set UseFIPSEndpoint on LoadOptions. -func WithUseFIPSEndpoint(v aws.FIPSEndpointState) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.UseFIPSEndpoint = v - return nil - } -} - -// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be -// used for requests. -func (o LoadOptions) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { - if o.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { - return aws.FIPSEndpointStateUnset, false, nil - } - return o.UseFIPSEndpoint, true, nil -} - -// WithDefaultsMode sets the SDK defaults configuration mode to the value provided. -// -// Zero or more functional options can be provided to provide configuration options for performing -// environment discovery when using aws.DefaultsModeAuto. -func WithDefaultsMode(mode aws.DefaultsMode, optFns ...func(options *DefaultsModeOptions)) LoadOptionsFunc { - do := DefaultsModeOptions{ - Mode: mode, - } - for _, fn := range optFns { - fn(&do) - } - return func(options *LoadOptions) error { - options.DefaultsModeOptions = do - return nil - } -} - -// GetS3DisableExpressAuth returns the configured value for -// [EnvConfig.S3DisableExpressAuth]. -func (o LoadOptions) GetS3DisableExpressAuth() (value, ok bool) { - if o.S3DisableExpressAuth == nil { - return false, false - } - - return *o.S3DisableExpressAuth, true -} - -// WithS3DisableExpressAuth sets [LoadOptions.S3DisableExpressAuth] -// to the value provided. -func WithS3DisableExpressAuth(v bool) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.S3DisableExpressAuth = &v - return nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/local.go b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go deleted file mode 100644 index b629137c82..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/local.go +++ /dev/null @@ -1,51 +0,0 @@ -package config - -import ( - "fmt" - "net" - "net/url" -) - -var lookupHostFn = net.LookupHost - -func isLoopbackHost(host string) (bool, error) { - ip := net.ParseIP(host) - if ip != nil { - return ip.IsLoopback(), nil - } - - // Host is not an ip, perform lookup - addrs, err := lookupHostFn(host) - if err != nil { - return false, err - } - if len(addrs) == 0 { - return false, fmt.Errorf("no addrs found for host, %s", host) - } - - for _, addr := range addrs { - if !net.ParseIP(addr).IsLoopback() { - return false, nil - } - } - - return true, nil -} - -func validateLocalURL(v string) error { - u, err := url.Parse(v) - if err != nil { - return err - } - - host := u.Hostname() - if len(host) == 0 { - return fmt.Errorf("unable to parse host from local HTTP cred provider URL") - } else if isLoopback, err := isLoopbackHost(host); err != nil { - return fmt.Errorf("failed to resolve host %q, %v", host, err) - } else if !isLoopback { - return fmt.Errorf("invalid endpoint host, %q, only host resolving to loopback addresses are allowed", host) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go deleted file mode 100644 index 13745fc98f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go +++ /dev/null @@ -1,704 +0,0 @@ -package config - -import ( - "context" - "io" - "net/http" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" - "github.com/aws/aws-sdk-go-v2/credentials/processcreds" - "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" - "github.com/aws/aws-sdk-go-v2/credentials/stscreds" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - smithybearer "github.com/aws/smithy-go/auth/bearer" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// sharedConfigProfileProvider provides access to the shared config profile -// name external configuration value. -type sharedConfigProfileProvider interface { - getSharedConfigProfile(ctx context.Context) (string, bool, error) -} - -// getSharedConfigProfile searches the configs for a sharedConfigProfileProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func getSharedConfigProfile(ctx context.Context, configs configs) (value string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(sharedConfigProfileProvider); ok { - value, found, err = p.getSharedConfigProfile(ctx) - if err != nil || found { - break - } - } - } - return -} - -// sharedConfigFilesProvider provides access to the shared config filesnames -// external configuration value. -type sharedConfigFilesProvider interface { - getSharedConfigFiles(ctx context.Context) ([]string, bool, error) -} - -// getSharedConfigFiles searches the configs for a sharedConfigFilesProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func getSharedConfigFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(sharedConfigFilesProvider); ok { - value, found, err = p.getSharedConfigFiles(ctx) - if err != nil || found { - break - } - } - } - - return -} - -// sharedCredentialsFilesProvider provides access to the shared credentials filesnames -// external configuration value. -type sharedCredentialsFilesProvider interface { - getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) -} - -// getSharedCredentialsFiles searches the configs for a sharedCredentialsFilesProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func getSharedCredentialsFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(sharedCredentialsFilesProvider); ok { - value, found, err = p.getSharedCredentialsFiles(ctx) - if err != nil || found { - break - } - } - } - - return -} - -// customCABundleProvider provides access to the custom CA bundle PEM bytes. -type customCABundleProvider interface { - getCustomCABundle(ctx context.Context) (io.Reader, bool, error) -} - -// getCustomCABundle searches the configs for a customCABundleProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func getCustomCABundle(ctx context.Context, configs configs) (value io.Reader, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(customCABundleProvider); ok { - value, found, err = p.getCustomCABundle(ctx) - if err != nil || found { - break - } - } - } - - return -} - -// regionProvider provides access to the region external configuration value. -type regionProvider interface { - getRegion(ctx context.Context) (string, bool, error) -} - -// getRegion searches the configs for a regionProvider and returns the value -// if found. Returns an error if a provider fails before a value is found. -func getRegion(ctx context.Context, configs configs) (value string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(regionProvider); ok { - value, found, err = p.getRegion(ctx) - if err != nil || found { - break - } - } - } - return -} - -// IgnoreConfiguredEndpointsProvider is needed to search for all providers -// that provide a flag to disable configured endpoints. -type IgnoreConfiguredEndpointsProvider interface { - GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error) -} - -// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured -// endpoints feature. -func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { - value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) - if err != nil || found { - break - } - } - } - return -} - -type baseEndpointProvider interface { - getBaseEndpoint(ctx context.Context) (string, bool, error) -} - -func getBaseEndpoint(ctx context.Context, configs configs) (value string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(baseEndpointProvider); ok { - value, found, err = p.getBaseEndpoint(ctx) - if err != nil || found { - break - } - } - } - return -} - -type servicesObjectProvider interface { - getServicesObject(ctx context.Context) (map[string]map[string]string, bool, error) -} - -func getServicesObject(ctx context.Context, configs configs) (value map[string]map[string]string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(servicesObjectProvider); ok { - value, found, err = p.getServicesObject(ctx) - if err != nil || found { - break - } - } - } - return -} - -// appIDProvider provides access to the sdk app ID value -type appIDProvider interface { - getAppID(ctx context.Context) (string, bool, error) -} - -func getAppID(ctx context.Context, configs configs) (value string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(appIDProvider); ok { - value, found, err = p.getAppID(ctx) - if err != nil || found { - break - } - } - } - return -} - -// disableRequestCompressionProvider provides access to the DisableRequestCompression -type disableRequestCompressionProvider interface { - getDisableRequestCompression(context.Context) (bool, bool, error) -} - -func getDisableRequestCompression(ctx context.Context, configs configs) (value bool, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(disableRequestCompressionProvider); ok { - value, found, err = p.getDisableRequestCompression(ctx) - if err != nil || found { - break - } - } - } - return -} - -// requestMinCompressSizeBytesProvider provides access to the MinCompressSizeBytes -type requestMinCompressSizeBytesProvider interface { - getRequestMinCompressSizeBytes(context.Context) (int64, bool, error) -} - -func getRequestMinCompressSizeBytes(ctx context.Context, configs configs) (value int64, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(requestMinCompressSizeBytesProvider); ok { - value, found, err = p.getRequestMinCompressSizeBytes(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ec2IMDSRegionProvider provides access to the ec2 imds region -// configuration value -type ec2IMDSRegionProvider interface { - getEC2IMDSRegion(ctx context.Context) (string, bool, error) -} - -// getEC2IMDSRegion searches the configs for a ec2IMDSRegionProvider and -// returns the value if found. Returns an error if a provider fails before -// a value is found. -func getEC2IMDSRegion(ctx context.Context, configs configs) (region string, found bool, err error) { - for _, cfg := range configs { - if provider, ok := cfg.(ec2IMDSRegionProvider); ok { - region, found, err = provider.getEC2IMDSRegion(ctx) - if err != nil || found { - break - } - } - } - return -} - -// credentialsProviderProvider provides access to the credentials external -// configuration value. -type credentialsProviderProvider interface { - getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) -} - -// getCredentialsProvider searches the configs for a credentialsProviderProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func getCredentialsProvider(ctx context.Context, configs configs) (p aws.CredentialsProvider, found bool, err error) { - for _, cfg := range configs { - if provider, ok := cfg.(credentialsProviderProvider); ok { - p, found, err = provider.getCredentialsProvider(ctx) - if err != nil || found { - break - } - } - } - return -} - -// credentialsCacheOptionsProvider is an interface for retrieving a function for setting -// the aws.CredentialsCacheOptions. -type credentialsCacheOptionsProvider interface { - getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) -} - -// getCredentialsCacheOptionsProvider is an interface for retrieving a function for setting -// the aws.CredentialsCacheOptions. -func getCredentialsCacheOptionsProvider(ctx context.Context, configs configs) ( - f func(*aws.CredentialsCacheOptions), found bool, err error, -) { - for _, config := range configs { - if p, ok := config.(credentialsCacheOptionsProvider); ok { - f, found, err = p.getCredentialsCacheOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// bearerAuthTokenProviderProvider provides access to the bearer authentication -// token external configuration value. -type bearerAuthTokenProviderProvider interface { - getBearerAuthTokenProvider(context.Context) (smithybearer.TokenProvider, bool, error) -} - -// getBearerAuthTokenProvider searches the config sources for a -// bearerAuthTokenProviderProvider and returns the value if found. Returns an -// error if a provider fails before a value is found. -func getBearerAuthTokenProvider(ctx context.Context, configs configs) (p smithybearer.TokenProvider, found bool, err error) { - for _, cfg := range configs { - if provider, ok := cfg.(bearerAuthTokenProviderProvider); ok { - p, found, err = provider.getBearerAuthTokenProvider(ctx) - if err != nil || found { - break - } - } - } - return -} - -// bearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for -// setting the smithy-go auth/bearer#TokenCacheOptions. -type bearerAuthTokenCacheOptionsProvider interface { - getBearerAuthTokenCacheOptions(context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) -} - -// getBearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for -// setting the smithy-go auth/bearer#TokenCacheOptions. -func getBearerAuthTokenCacheOptions(ctx context.Context, configs configs) ( - f func(*smithybearer.TokenCacheOptions), found bool, err error, -) { - for _, config := range configs { - if p, ok := config.(bearerAuthTokenCacheOptionsProvider); ok { - f, found, err = p.getBearerAuthTokenCacheOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ssoTokenProviderOptionsProvider is an interface for retrieving a function for -// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. -type ssoTokenProviderOptionsProvider interface { - getSSOTokenProviderOptions(context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) -} - -// getSSOTokenProviderOptions is an interface for retrieving a function for -// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. -func getSSOTokenProviderOptions(ctx context.Context, configs configs) ( - f func(*ssocreds.SSOTokenProviderOptions), found bool, err error, -) { - for _, config := range configs { - if p, ok := config.(ssoTokenProviderOptionsProvider); ok { - f, found, err = p.getSSOTokenProviderOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ssoTokenProviderOptionsProvider - -// processCredentialOptions is an interface for retrieving a function for setting -// the processcreds.Options. -type processCredentialOptions interface { - getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) -} - -// getProcessCredentialOptions searches the slice of configs and returns the first function found -func getProcessCredentialOptions(ctx context.Context, configs configs) (f func(*processcreds.Options), found bool, err error) { - for _, config := range configs { - if p, ok := config.(processCredentialOptions); ok { - f, found, err = p.getProcessCredentialOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ec2RoleCredentialOptionsProvider is an interface for retrieving a function -// for setting the ec2rolecreds.Provider options. -type ec2RoleCredentialOptionsProvider interface { - getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) -} - -// getEC2RoleCredentialProviderOptions searches the slice of configs and returns the first function found -func getEC2RoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*ec2rolecreds.Options), found bool, err error) { - for _, config := range configs { - if p, ok := config.(ec2RoleCredentialOptionsProvider); ok { - f, found, err = p.getEC2RoleCredentialOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// defaultRegionProvider is an interface for retrieving a default region if a region was not resolved from other sources -type defaultRegionProvider interface { - getDefaultRegion(ctx context.Context) (string, bool, error) -} - -// getDefaultRegion searches the slice of configs and returns the first fallback region found -func getDefaultRegion(ctx context.Context, configs configs) (value string, found bool, err error) { - for _, config := range configs { - if p, ok := config.(defaultRegionProvider); ok { - value, found, err = p.getDefaultRegion(ctx) - if err != nil || found { - break - } - } - } - return -} - -// endpointCredentialOptionsProvider is an interface for retrieving a function for setting -// the endpointcreds.ProviderOptions. -type endpointCredentialOptionsProvider interface { - getEndpointCredentialOptions(ctx context.Context) (func(*endpointcreds.Options), bool, error) -} - -// getEndpointCredentialProviderOptions searches the slice of configs and returns the first function found -func getEndpointCredentialProviderOptions(ctx context.Context, configs configs) (f func(*endpointcreds.Options), found bool, err error) { - for _, config := range configs { - if p, ok := config.(endpointCredentialOptionsProvider); ok { - f, found, err = p.getEndpointCredentialOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// webIdentityRoleCredentialOptionsProvider is an interface for retrieving a function for setting -// the stscreds.WebIdentityRoleProvider. -type webIdentityRoleCredentialOptionsProvider interface { - getWebIdentityRoleCredentialOptions(ctx context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) -} - -// getWebIdentityCredentialProviderOptions searches the slice of configs and returns the first function found -func getWebIdentityCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.WebIdentityRoleOptions), found bool, err error) { - for _, config := range configs { - if p, ok := config.(webIdentityRoleCredentialOptionsProvider); ok { - f, found, err = p.getWebIdentityRoleCredentialOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// assumeRoleCredentialOptionsProvider is an interface for retrieving a function for setting -// the stscreds.AssumeRoleOptions. -type assumeRoleCredentialOptionsProvider interface { - getAssumeRoleCredentialOptions(ctx context.Context) (func(*stscreds.AssumeRoleOptions), bool, error) -} - -// getAssumeRoleCredentialProviderOptions searches the slice of configs and returns the first function found -func getAssumeRoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.AssumeRoleOptions), found bool, err error) { - for _, config := range configs { - if p, ok := config.(assumeRoleCredentialOptionsProvider); ok { - f, found, err = p.getAssumeRoleCredentialOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// HTTPClient is an HTTP client implementation -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// httpClientProvider is an interface for retrieving HTTPClient -type httpClientProvider interface { - getHTTPClient(ctx context.Context) (HTTPClient, bool, error) -} - -// getHTTPClient searches the slice of configs and returns the HTTPClient set on configs -func getHTTPClient(ctx context.Context, configs configs) (client HTTPClient, found bool, err error) { - for _, config := range configs { - if p, ok := config.(httpClientProvider); ok { - client, found, err = p.getHTTPClient(ctx) - if err != nil || found { - break - } - } - } - return -} - -// apiOptionsProvider is an interface for retrieving APIOptions -type apiOptionsProvider interface { - getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) -} - -// getAPIOptions searches the slice of configs and returns the APIOptions set on configs -func getAPIOptions(ctx context.Context, configs configs) (apiOptions []func(*middleware.Stack) error, found bool, err error) { - for _, config := range configs { - if p, ok := config.(apiOptionsProvider); ok { - // retrieve APIOptions from configs and set it on cfg - apiOptions, found, err = p.getAPIOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// endpointResolverProvider is an interface for retrieving an aws.EndpointResolver from a configuration source -type endpointResolverProvider interface { - getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) -} - -// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used -// to configure the aws.Config.EndpointResolver value. -func getEndpointResolver(ctx context.Context, configs configs) (f aws.EndpointResolver, found bool, err error) { - for _, c := range configs { - if p, ok := c.(endpointResolverProvider); ok { - f, found, err = p.getEndpointResolver(ctx) - if err != nil || found { - break - } - } - } - return -} - -// endpointResolverWithOptionsProvider is an interface for retrieving an aws.EndpointResolverWithOptions from a configuration source -type endpointResolverWithOptionsProvider interface { - getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) -} - -// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used -// to configure the aws.Config.EndpointResolver value. -func getEndpointResolverWithOptions(ctx context.Context, configs configs) (f aws.EndpointResolverWithOptions, found bool, err error) { - for _, c := range configs { - if p, ok := c.(endpointResolverWithOptionsProvider); ok { - f, found, err = p.getEndpointResolverWithOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// loggerProvider is an interface for retrieving a logging.Logger from a configuration source. -type loggerProvider interface { - getLogger(ctx context.Context) (logging.Logger, bool, error) -} - -// getLogger searches the provided config sources for a logging.Logger that can be used -// to configure the aws.Config.Logger value. -func getLogger(ctx context.Context, configs configs) (l logging.Logger, found bool, err error) { - for _, c := range configs { - if p, ok := c.(loggerProvider); ok { - l, found, err = p.getLogger(ctx) - if err != nil || found { - break - } - } - } - return -} - -// clientLogModeProvider is an interface for retrieving the aws.ClientLogMode from a configuration source. -type clientLogModeProvider interface { - getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) -} - -func getClientLogMode(ctx context.Context, configs configs) (m aws.ClientLogMode, found bool, err error) { - for _, c := range configs { - if p, ok := c.(clientLogModeProvider); ok { - m, found, err = p.getClientLogMode(ctx) - if err != nil || found { - break - } - } - } - return -} - -// retryProvider is an configuration provider for custom Retryer. -type retryProvider interface { - getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) -} - -func getRetryer(ctx context.Context, configs configs) (v func() aws.Retryer, found bool, err error) { - for _, c := range configs { - if p, ok := c.(retryProvider); ok { - v, found, err = p.getRetryer(ctx) - if err != nil || found { - break - } - } - } - return -} - -// logConfigurationWarningsProvider is an configuration provider for -// retrieving a boolean indicating whether configuration issues should -// be logged when loading from config sources -type logConfigurationWarningsProvider interface { - getLogConfigurationWarnings(ctx context.Context) (bool, bool, error) -} - -func getLogConfigurationWarnings(ctx context.Context, configs configs) (v bool, found bool, err error) { - for _, c := range configs { - if p, ok := c.(logConfigurationWarningsProvider); ok { - v, found, err = p.getLogConfigurationWarnings(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ssoCredentialOptionsProvider is an interface for retrieving a function for setting -// the ssocreds.Options. -type ssoCredentialOptionsProvider interface { - getSSOProviderOptions(context.Context) (func(*ssocreds.Options), bool, error) -} - -func getSSOProviderOptions(ctx context.Context, configs configs) (v func(options *ssocreds.Options), found bool, err error) { - for _, c := range configs { - if p, ok := c.(ssoCredentialOptionsProvider); ok { - v, found, err = p.getSSOProviderOptions(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} - -type defaultsModeIMDSClientProvider interface { - getDefaultsModeIMDSClient(context.Context) (*imds.Client, bool, error) -} - -func getDefaultsModeIMDSClient(ctx context.Context, configs configs) (v *imds.Client, found bool, err error) { - for _, c := range configs { - if p, ok := c.(defaultsModeIMDSClientProvider); ok { - v, found, err = p.getDefaultsModeIMDSClient(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} - -type defaultsModeProvider interface { - getDefaultsMode(context.Context) (aws.DefaultsMode, bool, error) -} - -func getDefaultsMode(ctx context.Context, configs configs) (v aws.DefaultsMode, found bool, err error) { - for _, c := range configs { - if p, ok := c.(defaultsModeProvider); ok { - v, found, err = p.getDefaultsMode(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} - -type retryMaxAttemptsProvider interface { - GetRetryMaxAttempts(context.Context) (int, bool, error) -} - -func getRetryMaxAttempts(ctx context.Context, configs configs) (v int, found bool, err error) { - for _, c := range configs { - if p, ok := c.(retryMaxAttemptsProvider); ok { - v, found, err = p.GetRetryMaxAttempts(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} - -type retryModeProvider interface { - GetRetryMode(context.Context) (aws.RetryMode, bool, error) -} - -func getRetryMode(ctx context.Context, configs configs) (v aws.RetryMode, found bool, err error) { - for _, c := range configs { - if p, ok := c.(retryModeProvider); ok { - v, found, err = p.GetRetryMode(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go deleted file mode 100644 index fde2e3980e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go +++ /dev/null @@ -1,367 +0,0 @@ -package config - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net/http" - "os" - - "github.com/aws/aws-sdk-go-v2/aws" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - "github.com/aws/smithy-go/logging" -) - -// resolveDefaultAWSConfig will write default configuration values into the cfg -// value. It will write the default values, overwriting any previous value. -// -// This should be used as the first resolver in the slice of resolvers when -// resolving external configuration. -func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) error { - var sources []interface{} - for _, s := range cfgs { - sources = append(sources, s) - } - - *cfg = aws.Config{ - Logger: logging.NewStandardLogger(os.Stderr), - ConfigSources: sources, - } - return nil -} - -// resolveCustomCABundle extracts the first instance of a custom CA bundle filename -// from the external configurations. It will update the HTTP Client's builder -// to be configured with the custom CA bundle. -// -// Config provider used: -// * customCABundleProvider -func resolveCustomCABundle(ctx context.Context, cfg *aws.Config, cfgs configs) error { - pemCerts, found, err := getCustomCABundle(ctx, cfgs) - if err != nil { - // TODO error handling, What is the best way to handle this? - // capture previous errors continue. error out if all errors - return err - } - if !found { - return nil - } - - if cfg.HTTPClient == nil { - cfg.HTTPClient = awshttp.NewBuildableClient() - } - - trOpts, ok := cfg.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return fmt.Errorf("unable to add custom RootCAs HTTPClient, "+ - "has no WithTransportOptions, %T", cfg.HTTPClient) - } - - var appendErr error - client := trOpts.WithTransportOptions(func(tr *http.Transport) { - if tr.TLSClientConfig == nil { - tr.TLSClientConfig = &tls.Config{} - } - if tr.TLSClientConfig.RootCAs == nil { - tr.TLSClientConfig.RootCAs = x509.NewCertPool() - } - - b, err := ioutil.ReadAll(pemCerts) - if err != nil { - appendErr = fmt.Errorf("failed to read custom CA bundle PEM file") - } - - if !tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(b) { - appendErr = fmt.Errorf("failed to load custom CA bundle PEM file") - } - }) - if appendErr != nil { - return appendErr - } - - cfg.HTTPClient = client - return err -} - -// resolveRegion extracts the first instance of a Region from the configs slice. -// -// Config providers used: -// * regionProvider -func resolveRegion(ctx context.Context, cfg *aws.Config, configs configs) error { - v, found, err := getRegion(ctx, configs) - if err != nil { - // TODO error handling, What is the best way to handle this? - // capture previous errors continue. error out if all errors - return err - } - if !found { - return nil - } - - cfg.Region = v - return nil -} - -func resolveBaseEndpoint(ctx context.Context, cfg *aws.Config, configs configs) error { - var downcastCfgSources []interface{} - for _, cs := range configs { - downcastCfgSources = append(downcastCfgSources, interface{}(cs)) - } - - if val, found, err := GetIgnoreConfiguredEndpoints(ctx, downcastCfgSources); found && val && err == nil { - cfg.BaseEndpoint = nil - return nil - } - - v, found, err := getBaseEndpoint(ctx, configs) - if err != nil { - return err - } - - if !found { - return nil - } - cfg.BaseEndpoint = aws.String(v) - return nil -} - -// resolveAppID extracts the sdk app ID from the configs slice's SharedConfig or env var -func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error { - ID, _, err := getAppID(ctx, configs) - if err != nil { - return err - } - - cfg.AppID = ID - return nil -} - -// resolveDisableRequestCompression extracts the DisableRequestCompression from the configs slice's -// SharedConfig or EnvConfig -func resolveDisableRequestCompression(ctx context.Context, cfg *aws.Config, configs configs) error { - disable, _, err := getDisableRequestCompression(ctx, configs) - if err != nil { - return err - } - - cfg.DisableRequestCompression = disable - return nil -} - -// resolveRequestMinCompressSizeBytes extracts the RequestMinCompressSizeBytes from the configs slice's -// SharedConfig or EnvConfig -func resolveRequestMinCompressSizeBytes(ctx context.Context, cfg *aws.Config, configs configs) error { - minBytes, found, err := getRequestMinCompressSizeBytes(ctx, configs) - if err != nil { - return err - } - // must set a default min size 10240 if not configured - if !found { - minBytes = 10240 - } - cfg.RequestMinCompressSizeBytes = minBytes - return nil -} - -// resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default -// region if region had not been resolved from other sources. -func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error { - if len(cfg.Region) > 0 { - return nil - } - - v, found, err := getDefaultRegion(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.Region = v - - return nil -} - -// resolveHTTPClient extracts the first instance of a HTTPClient and sets `aws.Config.HTTPClient` to the HTTPClient instance -// if one has not been resolved from other sources. -func resolveHTTPClient(ctx context.Context, cfg *aws.Config, configs configs) error { - c, found, err := getHTTPClient(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.HTTPClient = c - return nil -} - -// resolveAPIOptions extracts the first instance of APIOptions and sets `aws.Config.APIOptions` to the resolved API options -// if one has not been resolved from other sources. -func resolveAPIOptions(ctx context.Context, cfg *aws.Config, configs configs) error { - o, found, err := getAPIOptions(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.APIOptions = o - - return nil -} - -// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice -// and sets the functions result on the aws.Config.EndpointResolver -func resolveEndpointResolver(ctx context.Context, cfg *aws.Config, configs configs) error { - endpointResolver, found, err := getEndpointResolver(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.EndpointResolver = endpointResolver - - return nil -} - -// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice -// and sets the functions result on the aws.Config.EndpointResolver -func resolveEndpointResolverWithOptions(ctx context.Context, cfg *aws.Config, configs configs) error { - endpointResolver, found, err := getEndpointResolverWithOptions(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.EndpointResolverWithOptions = endpointResolver - - return nil -} - -func resolveLogger(ctx context.Context, cfg *aws.Config, configs configs) error { - logger, found, err := getLogger(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.Logger = logger - - return nil -} - -func resolveClientLogMode(ctx context.Context, cfg *aws.Config, configs configs) error { - mode, found, err := getClientLogMode(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.ClientLogMode = mode - - return nil -} - -func resolveRetryer(ctx context.Context, cfg *aws.Config, configs configs) error { - retryer, found, err := getRetryer(ctx, configs) - if err != nil { - return err - } - - if found { - cfg.Retryer = retryer - return nil - } - - // Only load the retry options if a custom retryer has not be specified. - if err = resolveRetryMaxAttempts(ctx, cfg, configs); err != nil { - return err - } - return resolveRetryMode(ctx, cfg, configs) -} - -func resolveEC2IMDSRegion(ctx context.Context, cfg *aws.Config, configs configs) error { - if len(cfg.Region) > 0 { - return nil - } - - region, found, err := getEC2IMDSRegion(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.Region = region - - return nil -} - -func resolveDefaultsModeOptions(ctx context.Context, cfg *aws.Config, configs configs) error { - defaultsMode, found, err := getDefaultsMode(ctx, configs) - if err != nil { - return err - } - if !found { - defaultsMode = aws.DefaultsModeLegacy - } - - var environment aws.RuntimeEnvironment - if defaultsMode == aws.DefaultsModeAuto { - envConfig, _, _ := getAWSConfigSources(configs) - - client, found, err := getDefaultsModeIMDSClient(ctx, configs) - if err != nil { - return err - } - if !found { - client = imds.NewFromConfig(*cfg) - } - - environment, err = resolveDefaultsModeRuntimeEnvironment(ctx, envConfig, client) - if err != nil { - return err - } - } - - cfg.DefaultsMode = defaultsMode - cfg.RuntimeEnvironment = environment - - return nil -} - -func resolveRetryMaxAttempts(ctx context.Context, cfg *aws.Config, configs configs) error { - maxAttempts, found, err := getRetryMaxAttempts(ctx, configs) - if err != nil || !found { - return err - } - cfg.RetryMaxAttempts = maxAttempts - - return nil -} - -func resolveRetryMode(ctx context.Context, cfg *aws.Config, configs configs) error { - retryMode, found, err := getRetryMode(ctx, configs) - if err != nil || !found { - return err - } - cfg.RetryMode = retryMode - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go deleted file mode 100644 index a8ebb3c0a3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go +++ /dev/null @@ -1,122 +0,0 @@ -package config - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" - "github.com/aws/aws-sdk-go-v2/service/ssooidc" - smithybearer "github.com/aws/smithy-go/auth/bearer" -) - -// resolveBearerAuthToken extracts a token provider from the config sources. -// -// If an explicit bearer authentication token provider is not found the -// resolver will fallback to resolving token provider via other config sources -// such as SharedConfig. -func resolveBearerAuthToken(ctx context.Context, cfg *aws.Config, configs configs) error { - found, err := resolveBearerAuthTokenProvider(ctx, cfg, configs) - if found || err != nil { - return err - } - - return resolveBearerAuthTokenProviderChain(ctx, cfg, configs) -} - -// resolveBearerAuthTokenProvider extracts the first instance of -// BearerAuthTokenProvider from the config sources. -// -// The resolved BearerAuthTokenProvider will be wrapped in a cache to ensure -// the Token is only refreshed when needed. This also protects the -// TokenProvider so it can be used concurrently. -// -// Config providers used: -// * bearerAuthTokenProviderProvider -func resolveBearerAuthTokenProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { - tokenProvider, found, err := getBearerAuthTokenProvider(ctx, configs) - if !found || err != nil { - return false, err - } - - cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( - ctx, configs, tokenProvider) - if err != nil { - return false, err - } - - return true, nil -} - -func resolveBearerAuthTokenProviderChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { - _, sharedConfig, _ := getAWSConfigSources(configs) - - var provider smithybearer.TokenProvider - - if sharedConfig.SSOSession != nil { - provider, err = resolveBearerAuthSSOTokenProvider( - ctx, cfg, sharedConfig.SSOSession, configs) - } - - if err == nil && provider != nil { - cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( - ctx, configs, provider) - } - - return err -} - -func resolveBearerAuthSSOTokenProvider(ctx context.Context, cfg *aws.Config, session *SSOSession, configs configs) (*ssocreds.SSOTokenProvider, error) { - ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) - if err != nil { - return nil, fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err) - } - - var optFns []func(*ssocreds.SSOTokenProviderOptions) - if found { - optFns = append(optFns, ssoTokenProviderOptionsFn) - } - - cachePath, err := ssocreds.StandardCachedTokenFilepath(session.Name) - if err != nil { - return nil, fmt.Errorf("failed to get SSOTokenProvider's cache path, %w", err) - } - - client := ssooidc.NewFromConfig(*cfg) - provider := ssocreds.NewSSOTokenProvider(client, cachePath, optFns...) - - return provider, nil -} - -// wrapWithBearerAuthTokenCache will wrap provider with an smithy-go -// bearer/auth#TokenCache with the provided options if the provider is not -// already a TokenCache. -func wrapWithBearerAuthTokenCache( - ctx context.Context, - cfgs configs, - provider smithybearer.TokenProvider, - optFns ...func(*smithybearer.TokenCacheOptions), -) (smithybearer.TokenProvider, error) { - _, ok := provider.(*smithybearer.TokenCache) - if ok { - return provider, nil - } - - tokenCacheConfigOptions, optionsFound, err := getBearerAuthTokenCacheOptions(ctx, cfgs) - if err != nil { - return nil, err - } - - opts := make([]func(*smithybearer.TokenCacheOptions), 0, 2+len(optFns)) - opts = append(opts, func(o *smithybearer.TokenCacheOptions) { - o.RefreshBeforeExpires = 5 * time.Minute - o.RetrieveBearerTokenTimeout = 30 * time.Second - }) - opts = append(opts, optFns...) - if optionsFound { - opts = append(opts, tokenCacheConfigOptions) - } - - return smithybearer.NewTokenCache(provider, opts...), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go deleted file mode 100644 index 89368520f3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go +++ /dev/null @@ -1,566 +0,0 @@ -package config - -import ( - "context" - "fmt" - "io/ioutil" - "net" - "net/url" - "os" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" - "github.com/aws/aws-sdk-go-v2/credentials/processcreds" - "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" - "github.com/aws/aws-sdk-go-v2/credentials/stscreds" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - "github.com/aws/aws-sdk-go-v2/service/sso" - "github.com/aws/aws-sdk-go-v2/service/ssooidc" - "github.com/aws/aws-sdk-go-v2/service/sts" -) - -const ( - // valid credential source values - credSourceEc2Metadata = "Ec2InstanceMetadata" - credSourceEnvironment = "Environment" - credSourceECSContainer = "EcsContainer" - httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" -) - -// direct representation of the IPv4 address for the ECS container -// "169.254.170.2" -var ecsContainerIPv4 net.IP = []byte{ - 169, 254, 170, 2, -} - -// direct representation of the IPv4 address for the EKS container -// "169.254.170.23" -var eksContainerIPv4 net.IP = []byte{ - 169, 254, 170, 23, -} - -// direct representation of the IPv6 address for the EKS container -// "fd00:ec2::23" -var eksContainerIPv6 net.IP = []byte{ - 0xFD, 0, 0xE, 0xC2, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0x23, -} - -var ( - ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing -) - -// resolveCredentials extracts a credential provider from slice of config -// sources. -// -// If an explicit credential provider is not found the resolver will fallback -// to resolving credentials by extracting a credential provider from EnvConfig -// and SharedConfig. -func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { - found, err := resolveCredentialProvider(ctx, cfg, configs) - if found || err != nil { - return err - } - - return resolveCredentialChain(ctx, cfg, configs) -} - -// resolveCredentialProvider extracts the first instance of Credentials from the -// config slices. -// -// The resolved CredentialProvider will be wrapped in a cache to ensure the -// credentials are only refreshed when needed. This also protects the -// credential provider to be used concurrently. -// -// Config providers used: -// * credentialsProviderProvider -func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { - credProvider, found, err := getCredentialsProvider(ctx, configs) - if !found || err != nil { - return false, err - } - - cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, credProvider) - if err != nil { - return false, err - } - - return true, nil -} - -// resolveCredentialChain resolves a credential provider chain using EnvConfig -// and SharedConfig if present in the slice of provided configs. -// -// The resolved CredentialProvider will be wrapped in a cache to ensure the -// credentials are only refreshed when needed. This also protects the -// credential provider to be used concurrently. -func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { - envConfig, sharedConfig, other := getAWSConfigSources(configs) - - // When checking if a profile was specified programmatically we should only consider the "other" - // configuration sources that have been provided. This ensures we correctly honor the expected credential - // hierarchy. - _, sharedProfileSet, err := getSharedConfigProfile(ctx, other) - if err != nil { - return err - } - - switch { - case sharedProfileSet: - err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) - case envConfig.Credentials.HasKeys(): - cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} - case len(envConfig.WebIdentityTokenFilePath) > 0: - err = assumeWebIdentity(ctx, cfg, envConfig.WebIdentityTokenFilePath, envConfig.RoleARN, envConfig.RoleSessionName, configs) - default: - err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) - } - if err != nil { - return err - } - - // Wrap the resolved provider in a cache so the SDK will cache credentials. - cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, cfg.Credentials) - if err != nil { - return err - } - - return nil -} - -func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (err error) { - - switch { - case sharedConfig.Source != nil: - // Assume IAM role with credentials source from a different profile. - err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs) - - case sharedConfig.Credentials.HasKeys(): - // Static Credentials from Shared Config/Credentials file. - cfg.Credentials = credentials.StaticCredentialsProvider{ - Value: sharedConfig.Credentials, - } - - case len(sharedConfig.CredentialSource) != 0: - err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs) - - case len(sharedConfig.WebIdentityTokenFile) != 0: - // Credentials from Assume Web Identity token require an IAM Role, and - // that roll will be assumed. May be wrapped with another assume role - // via SourceProfile. - return assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs) - - case sharedConfig.hasSSOConfiguration(): - err = resolveSSOCredentials(ctx, cfg, sharedConfig, configs) - - case len(sharedConfig.CredentialProcess) != 0: - // Get credentials from CredentialProcess - err = processCredentials(ctx, cfg, sharedConfig, configs) - - case len(envConfig.ContainerCredentialsEndpoint) != 0: - err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) - - case len(envConfig.ContainerCredentialsRelativePath) != 0: - err = resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) - - default: - err = resolveEC2RoleCredentials(ctx, cfg, configs) - } - if err != nil { - return err - } - - if len(sharedConfig.RoleARN) > 0 { - return credsFromAssumeRole(ctx, cfg, sharedConfig, configs) - } - - return nil -} - -func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { - if err := sharedConfig.validateSSOConfiguration(); err != nil { - return err - } - - var options []func(*ssocreds.Options) - v, found, err := getSSOProviderOptions(ctx, configs) - if err != nil { - return err - } - if found { - options = append(options, v) - } - - cfgCopy := cfg.Copy() - - if sharedConfig.SSOSession != nil { - ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) - if err != nil { - return fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err) - } - var optFns []func(*ssocreds.SSOTokenProviderOptions) - if found { - optFns = append(optFns, ssoTokenProviderOptionsFn) - } - cfgCopy.Region = sharedConfig.SSOSession.SSORegion - cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedConfig.SSOSession.Name) - if err != nil { - return err - } - oidcClient := ssooidc.NewFromConfig(cfgCopy) - tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath, optFns...) - options = append(options, func(o *ssocreds.Options) { - o.SSOTokenProvider = tokenProvider - o.CachedTokenFilepath = cachedPath - }) - } else { - cfgCopy.Region = sharedConfig.SSORegion - } - - cfg.Credentials = ssocreds.New(sso.NewFromConfig(cfgCopy), sharedConfig.SSOAccountID, sharedConfig.SSORoleName, sharedConfig.SSOStartURL, options...) - - return nil -} - -func ecsContainerURI(path string) string { - return fmt.Sprintf("%s%s", ecsContainerEndpoint, path) -} - -func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { - var opts []func(*processcreds.Options) - - options, found, err := getProcessCredentialOptions(ctx, configs) - if err != nil { - return err - } - if found { - opts = append(opts, options) - } - - cfg.Credentials = processcreds.NewProvider(sharedConfig.CredentialProcess, opts...) - - return nil -} - -// isAllowedHost allows host to be loopback or known ECS/EKS container IPs -// -// host can either be an IP address OR an unresolved hostname - resolution will -// be automatically performed in the latter case -func isAllowedHost(host string) (bool, error) { - if ip := net.ParseIP(host); ip != nil { - return isIPAllowed(ip), nil - } - - addrs, err := lookupHostFn(host) - if err != nil { - return false, err - } - - for _, addr := range addrs { - if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) { - return false, nil - } - } - - return true, nil -} - -func isIPAllowed(ip net.IP) bool { - return ip.IsLoopback() || - ip.Equal(ecsContainerIPv4) || - ip.Equal(eksContainerIPv4) || - ip.Equal(eksContainerIPv6) -} - -func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpointURL, authToken string, configs configs) error { - var resolveErr error - - parsed, err := url.Parse(endpointURL) - if err != nil { - resolveErr = fmt.Errorf("invalid URL, %w", err) - } else { - host := parsed.Hostname() - if len(host) == 0 { - resolveErr = fmt.Errorf("unable to parse host from local HTTP cred provider URL") - } else if parsed.Scheme == "http" { - if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil { - resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, allowHostErr) - } else if !isAllowedHost { - resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed", host) - } - } - } - - if resolveErr != nil { - return resolveErr - } - - return resolveHTTPCredProvider(ctx, cfg, endpointURL, authToken, configs) -} - -func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToken string, configs configs) error { - optFns := []func(*endpointcreds.Options){ - func(options *endpointcreds.Options) { - if len(authToken) != 0 { - options.AuthorizationToken = authToken - } - if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" { - options.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) { - var contents []byte - var err error - if contents, err = ioutil.ReadFile(authFilePath); err != nil { - return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err) - } - return string(contents), nil - }) - } - options.APIOptions = cfg.APIOptions - if cfg.Retryer != nil { - options.Retryer = cfg.Retryer() - } - }, - } - - optFn, found, err := getEndpointCredentialProviderOptions(ctx, configs) - if err != nil { - return err - } - if found { - optFns = append(optFns, optFn) - } - - provider := endpointcreds.New(url, optFns...) - - cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider, func(options *aws.CredentialsCacheOptions) { - options.ExpiryWindow = 5 * time.Minute - }) - if err != nil { - return err - } - - return nil -} - -func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (err error) { - switch sharedCfg.CredentialSource { - case credSourceEc2Metadata: - return resolveEC2RoleCredentials(ctx, cfg, configs) - - case credSourceEnvironment: - cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} - - case credSourceECSContainer: - if len(envConfig.ContainerCredentialsRelativePath) == 0 { - return fmt.Errorf("EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set") - } - return resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) - - default: - return fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment") - } - - return nil -} - -func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { - optFns := make([]func(*ec2rolecreds.Options), 0, 2) - - optFn, found, err := getEC2RoleCredentialProviderOptions(ctx, configs) - if err != nil { - return err - } - if found { - optFns = append(optFns, optFn) - } - - optFns = append(optFns, func(o *ec2rolecreds.Options) { - // Only define a client from config if not already defined. - if o.Client == nil { - o.Client = imds.NewFromConfig(*cfg) - } - }) - - provider := ec2rolecreds.New(optFns...) - - cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider) - if err != nil { - return err - } - - return nil -} - -func getAWSConfigSources(cfgs configs) (*EnvConfig, *SharedConfig, configs) { - var ( - envConfig *EnvConfig - sharedConfig *SharedConfig - other configs - ) - - for i := range cfgs { - switch c := cfgs[i].(type) { - case EnvConfig: - if envConfig == nil { - envConfig = &c - } - case *EnvConfig: - if envConfig == nil { - envConfig = c - } - case SharedConfig: - if sharedConfig == nil { - sharedConfig = &c - } - case *SharedConfig: - if envConfig == nil { - sharedConfig = c - } - default: - other = append(other, c) - } - } - - if envConfig == nil { - envConfig = &EnvConfig{} - } - - if sharedConfig == nil { - sharedConfig = &SharedConfig{} - } - - return envConfig, sharedConfig, other -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a -// session when the MFAToken option is not set when shared config is configured -// load assume a role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Error is the error message -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} - -func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, roleARN, sessionName string, configs configs) error { - if len(filepath) == 0 { - return fmt.Errorf("token file path is not set") - } - - optFns := []func(*stscreds.WebIdentityRoleOptions){ - func(options *stscreds.WebIdentityRoleOptions) { - options.RoleSessionName = sessionName - }, - } - - optFn, found, err := getWebIdentityCredentialProviderOptions(ctx, configs) - if err != nil { - return err - } - - if found { - optFns = append(optFns, optFn) - } - - opts := stscreds.WebIdentityRoleOptions{ - RoleARN: roleARN, - } - - for _, fn := range optFns { - fn(&opts) - } - - if len(opts.RoleARN) == 0 { - return fmt.Errorf("role ARN is not set") - } - - client := opts.Client - if client == nil { - client = sts.NewFromConfig(*cfg) - } - - provider := stscreds.NewWebIdentityRoleProvider(client, roleARN, stscreds.IdentityTokenFile(filepath), optFns...) - - cfg.Credentials = provider - - return nil -} - -func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *SharedConfig, configs configs) (err error) { - optFns := []func(*stscreds.AssumeRoleOptions){ - func(options *stscreds.AssumeRoleOptions) { - options.RoleSessionName = sharedCfg.RoleSessionName - if sharedCfg.RoleDurationSeconds != nil { - if *sharedCfg.RoleDurationSeconds/time.Minute > 15 { - options.Duration = *sharedCfg.RoleDurationSeconds - } - } - // Assume role with external ID - if len(sharedCfg.ExternalID) > 0 { - options.ExternalID = aws.String(sharedCfg.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.MFASerial) != 0 { - options.SerialNumber = aws.String(sharedCfg.MFASerial) - } - }, - } - - optFn, found, err := getAssumeRoleCredentialProviderOptions(ctx, configs) - if err != nil { - return err - } - if found { - optFns = append(optFns, optFn) - } - - { - // Synthesize options early to validate configuration errors sooner to ensure a token provider - // is present if the SerialNumber was set. - var o stscreds.AssumeRoleOptions - for _, fn := range optFns { - fn(&o) - } - if o.TokenProvider == nil && o.SerialNumber != nil { - return AssumeRoleTokenProviderNotSetError{} - } - } - - cfg.Credentials = stscreds.NewAssumeRoleProvider(sts.NewFromConfig(*cfg), sharedCfg.RoleARN, optFns...) - - return nil -} - -// wrapWithCredentialsCache will wrap provider with an aws.CredentialsCache -// with the provided options if the provider is not already a -// aws.CredentialsCache. -func wrapWithCredentialsCache( - ctx context.Context, - cfgs configs, - provider aws.CredentialsProvider, - optFns ...func(options *aws.CredentialsCacheOptions), -) (aws.CredentialsProvider, error) { - _, ok := provider.(*aws.CredentialsCache) - if ok { - return provider, nil - } - - credCacheOptions, optionsFound, err := getCredentialsCacheOptionsProvider(ctx, cfgs) - if err != nil { - return nil, err - } - - // force allocation of a new slice if the additional options are - // needed, to prevent overwriting the passed in slice of options. - optFns = optFns[:len(optFns):len(optFns)] - if optionsFound { - optFns = append(optFns, credCacheOptions) - } - - return aws.NewCredentialsCache(provider, optFns...), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go deleted file mode 100644 index c546cb7d0f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go +++ /dev/null @@ -1,1584 +0,0 @@ -package config - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - "github.com/aws/aws-sdk-go-v2/internal/ini" - "github.com/aws/aws-sdk-go-v2/internal/shareddefaults" - "github.com/aws/smithy-go/logging" - smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression" -) - -const ( - // Prefix to use for filtering profiles. The profile prefix should only - // exist in the shared config file, not the credentials file. - profilePrefix = `profile ` - - // Prefix to be used for SSO sections. These are supposed to only exist in - // the shared config file, not the credentials file. - ssoSectionPrefix = `sso-session ` - - // Prefix for services section. It is referenced in profile via the services - // parameter to configure clients for service-specific parameters. - servicesPrefix = `services ` - - // string equivalent for boolean - endpointDiscoveryDisabled = `false` - endpointDiscoveryEnabled = `true` - endpointDiscoveryAuto = `auto` - - // Static Credentials group - accessKeyIDKey = `aws_access_key_id` // group required - secretAccessKey = `aws_secret_access_key` // group required - sessionTokenKey = `aws_session_token` // optional - - // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required - credentialSourceKey = `credential_source` // group required (or source_profile) - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional - roleDurationSecondsKey = "duration_seconds" // optional - - // AWS Single Sign-On (AWS SSO) group - ssoSessionNameKey = "sso_session" - - ssoRegionKey = "sso_region" - ssoStartURLKey = "sso_start_url" - - ssoAccountIDKey = "sso_account_id" - ssoRoleNameKey = "sso_role_name" - - // Additional Config fields - regionKey = `region` - - // endpoint discovery group - enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional - - // External Credential process - credentialProcessKey = `credential_process` // optional - - // Web Identity Token File - webIdentityTokenFileKey = `web_identity_token_file` // optional - - // S3 ARN Region Usage - s3UseARNRegionKey = "s3_use_arn_region" - - ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode" - - ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" - - ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled" - - // Use DualStack Endpoint Resolution - useDualStackEndpoint = "use_dualstack_endpoint" - - // DefaultSharedConfigProfile is the default profile to be used when - // loading configuration from the config files if another profile name - // is not provided. - DefaultSharedConfigProfile = `default` - - // S3 Disable Multi-Region AccessPoints - s3DisableMultiRegionAccessPointsKey = `s3_disable_multiregion_access_points` - - useFIPSEndpointKey = "use_fips_endpoint" - - defaultsModeKey = "defaults_mode" - - // Retry options - retryMaxAttemptsKey = "max_attempts" - retryModeKey = "retry_mode" - - caBundleKey = "ca_bundle" - - sdkAppID = "sdk_ua_app_id" - - ignoreConfiguredEndpoints = "ignore_configured_endpoint_urls" - - endpointURL = "endpoint_url" - - servicesSectionKey = "services" - - disableRequestCompression = "disable_request_compression" - requestMinCompressionSizeBytes = "request_min_compression_size_bytes" - - s3DisableExpressSessionAuthKey = "s3_disable_express_session_auth" -) - -// defaultSharedConfigProfile allows for swapping the default profile for testing -var defaultSharedConfigProfile = DefaultSharedConfigProfile - -// DefaultSharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func DefaultSharedCredentialsFilename() string { - return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "credentials") -} - -// DefaultSharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func DefaultSharedConfigFilename() string { - return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "config") -} - -// DefaultSharedConfigFiles is a slice of the default shared config files that -// the will be used in order to load the SharedConfig. -var DefaultSharedConfigFiles = []string{ - DefaultSharedConfigFilename(), -} - -// DefaultSharedCredentialsFiles is a slice of the default shared credentials -// files that the will be used in order to load the SharedConfig. -var DefaultSharedCredentialsFiles = []string{ - DefaultSharedCredentialsFilename(), -} - -// SSOSession provides the shared configuration parameters of the sso-session -// section. -type SSOSession struct { - Name string - SSORegion string - SSOStartURL string -} - -func (s *SSOSession) setFromIniSection(section ini.Section) { - updateString(&s.Name, section, ssoSessionNameKey) - updateString(&s.SSORegion, section, ssoRegionKey) - updateString(&s.SSOStartURL, section, ssoStartURLKey) -} - -// Services contains values configured in the services section -// of the AWS configuration file. -type Services struct { - // Services section values - // {"serviceId": {"key": "value"}} - // e.g. {"s3": {"endpoint_url": "example.com"}} - ServiceValues map[string]map[string]string -} - -func (s *Services) setFromIniSection(section ini.Section) { - if s.ServiceValues == nil { - s.ServiceValues = make(map[string]map[string]string) - } - for _, service := range section.List() { - s.ServiceValues[service] = section.Map(service) - } -} - -// SharedConfig represents the configuration fields of the SDK config files. -type SharedConfig struct { - Profile string - - // Credentials values from the config file. Both aws_access_key_id - // and aws_secret_access_key must be provided together in the same file - // to be considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of the - // other two fields are also provided. - // - // aws_access_key_id - // aws_secret_access_key - // aws_session_token - Credentials aws.Credentials - - CredentialSource string - CredentialProcess string - WebIdentityTokenFile string - - // SSO session options - SSOSessionName string - SSOSession *SSOSession - - // Legacy SSO session options - SSORegion string - SSOStartURL string - - // SSO fields not used - SSOAccountID string - SSORoleName string - - RoleARN string - ExternalID string - MFASerial string - RoleSessionName string - RoleDurationSeconds *time.Duration - - SourceProfileName string - Source *SharedConfig - - // Region is the region the SDK should use for looking up AWS service endpoints - // and signing requests. - // - // region = us-west-2 - Region string - - // EnableEndpointDiscovery can be enabled or disabled in the shared config - // by setting endpoint_discovery_enabled to true, or false respectively. - // - // endpoint_discovery_enabled = true - EnableEndpointDiscovery aws.EndpointDiscoveryEnableState - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // s3_use_arn_region=true - S3UseARNRegion *bool - - // Specifies the EC2 Instance Metadata Service default endpoint selection - // mode (IPv4 or IPv6) - // - // ec2_metadata_service_endpoint_mode=IPv6 - EC2IMDSEndpointMode imds.EndpointModeState - - // Specifies the EC2 Instance Metadata Service endpoint to use. If - // specified it overrides EC2IMDSEndpointMode. - // - // ec2_metadata_service_endpoint=http://fd00:ec2::254 - EC2IMDSEndpoint string - - // Specifies that IMDS clients should not fallback to IMDSv1 if token - // requests fail. - // - // ec2_metadata_v1_disabled=true - EC2IMDSv1Disabled *bool - - // Specifies if the S3 service should disable support for Multi-Region - // access-points - // - // s3_disable_multiregion_access_points=true - S3DisableMultiRegionAccessPoints *bool - - // Specifies that SDK clients must resolve a dual-stack endpoint for - // services. - // - // use_dualstack_endpoint=true - UseDualStackEndpoint aws.DualStackEndpointState - - // Specifies that SDK clients must resolve a FIPS endpoint for - // services. - // - // use_fips_endpoint=true - UseFIPSEndpoint aws.FIPSEndpointState - - // Specifies which defaults mode should be used by services. - // - // defaults_mode=standard - DefaultsMode aws.DefaultsMode - - // Specifies the maximum number attempts an API client will call an - // operation that fails with a retryable error. - // - // max_attempts=3 - RetryMaxAttempts int - - // Specifies the retry model the API client will be created with. - // - // retry_mode=standard - RetryMode aws.RetryMode - - // Sets the path to a custom Credentials Authority (CA) Bundle PEM file - // that the SDK will use instead of the system's root CA bundle. Only use - // this if you want to configure the SDK to use a custom set of CAs. - // - // Enabling this option will attempt to merge the Transport into the SDK's - // HTTP client. If the client's Transport is not a http.Transport an error - // will be returned. If the Transport's TLS config is set this option will - // cause the SDK to overwrite the Transport's TLS config's RootCAs value. - // - // Setting a custom HTTPClient in the aws.Config options will override this - // setting. To use this option and custom HTTP client, the HTTP client - // needs to be provided when creating the config. Not the service client. - // - // ca_bundle=$HOME/my_custom_ca_bundle - CustomCABundle string - - // aws sdk app ID that can be added to user agent header string - AppID string - - // Flag used to disable configured endpoints. - IgnoreConfiguredEndpoints *bool - - // Value to contain configured endpoints to be propagated to - // corresponding endpoint resolution field. - BaseEndpoint string - - // Services section config. - ServicesSectionName string - Services Services - - // determine if request compression is allowed, default to false - // retrieved from config file's profile field disable_request_compression - DisableRequestCompression *bool - - // inclusive threshold request body size to trigger compression, - // default to 10240 and must be within 0 and 10485760 bytes inclusive - // retrieved from config file's profile field request_min_compression_size_bytes - RequestMinCompressSizeBytes *int64 - - // Whether S3Express auth is disabled. - // - // This will NOT prevent requests from being made to S3Express buckets, it - // will only bypass the modified endpoint routing and signing behaviors - // associated with the feature. - S3DisableExpressAuth *bool -} - -func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { - if len(c.DefaultsMode) == 0 { - return "", false, nil - } - - return c.DefaultsMode, true, nil -} - -// GetRetryMaxAttempts returns the maximum number of attempts an API client -// created Retryer should attempt an operation call before failing. -func (c SharedConfig) GetRetryMaxAttempts(ctx context.Context) (value int, ok bool, err error) { - if c.RetryMaxAttempts == 0 { - return 0, false, nil - } - - return c.RetryMaxAttempts, true, nil -} - -// GetRetryMode returns the model the API client should create its Retryer in. -func (c SharedConfig) GetRetryMode(ctx context.Context) (value aws.RetryMode, ok bool, err error) { - if len(c.RetryMode) == 0 { - return "", false, nil - } - - return c.RetryMode, true, nil -} - -// GetS3UseARNRegion returns if the S3 service should allow ARNs to direct the region -// the client's requests are sent to. -func (c SharedConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { - if c.S3UseARNRegion == nil { - return false, false, nil - } - - return *c.S3UseARNRegion, true, nil -} - -// GetEnableEndpointDiscovery returns if the enable_endpoint_discovery is set. -func (c SharedConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { - if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { - return aws.EndpointDiscoveryUnset, false, nil - } - - return c.EnableEndpointDiscovery, true, nil -} - -// GetS3DisableMultiRegionAccessPoints returns if the S3 service should disable support for Multi-Region -// access-points. -func (c SharedConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { - if c.S3DisableMultiRegionAccessPoints == nil { - return false, false, nil - } - - return *c.S3DisableMultiRegionAccessPoints, true, nil -} - -// GetRegion returns the region for the profile if a region is set. -func (c SharedConfig) getRegion(ctx context.Context) (string, bool, error) { - if len(c.Region) == 0 { - return "", false, nil - } - return c.Region, true, nil -} - -// GetCredentialsProvider returns the credentials for a profile if they were set. -func (c SharedConfig) getCredentialsProvider() (aws.Credentials, bool, error) { - return c.Credentials, true, nil -} - -// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. -func (c SharedConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { - if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { - return imds.EndpointModeStateUnset, false, nil - } - - return c.EC2IMDSEndpointMode, true, nil -} - -// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. -func (c SharedConfig) GetEC2IMDSEndpoint() (string, bool, error) { - if len(c.EC2IMDSEndpoint) == 0 { - return "", false, nil - } - - return c.EC2IMDSEndpoint, true, nil -} - -// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option -// resolver interface. -func (c SharedConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { - if c.EC2IMDSv1Disabled == nil { - return false, false - } - - return *c.EC2IMDSv1Disabled, true -} - -// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be -// used for requests. -func (c SharedConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { - if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { - return aws.DualStackEndpointStateUnset, false, nil - } - - return c.UseDualStackEndpoint, true, nil -} - -// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be -// used for requests. -func (c SharedConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { - if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { - return aws.FIPSEndpointStateUnset, false, nil - } - - return c.UseFIPSEndpoint, true, nil -} - -// GetS3DisableExpressAuth returns the configured value for -// [SharedConfig.S3DisableExpressAuth]. -func (c SharedConfig) GetS3DisableExpressAuth() (value, ok bool) { - if c.S3DisableExpressAuth == nil { - return false, false - } - - return *c.S3DisableExpressAuth, true -} - -// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was -func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { - if len(c.CustomCABundle) == 0 { - return nil, false, nil - } - - b, err := ioutil.ReadFile(c.CustomCABundle) - if err != nil { - return nil, false, err - } - return bytes.NewReader(b), true, nil -} - -// getAppID returns the sdk app ID if set in shared config profile -func (c SharedConfig) getAppID(context.Context) (string, bool, error) { - return c.AppID, len(c.AppID) > 0, nil -} - -// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured -// endpoints feature. -func (c SharedConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { - if c.IgnoreConfiguredEndpoints == nil { - return false, false, nil - } - - return *c.IgnoreConfiguredEndpoints, true, nil -} - -func (c SharedConfig) getBaseEndpoint(context.Context) (string, bool, error) { - return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil -} - -// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use -// with configured endpoints. -func (c SharedConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { - if service, ok := c.Services.ServiceValues[normalizeShared(sdkID)]; ok { - if endpt, ok := service[endpointURL]; ok { - return endpt, true, nil - } - } - return "", false, nil -} - -func normalizeShared(sdkID string) string { - lower := strings.ToLower(sdkID) - return strings.ReplaceAll(lower, " ", "_") -} - -func (c SharedConfig) getServicesObject(context.Context) (map[string]map[string]string, bool, error) { - return c.Services.ServiceValues, c.Services.ServiceValues != nil, nil -} - -// loadSharedConfigIgnoreNotExist is an alias for loadSharedConfig with the -// addition of ignoring when none of the files exist or when the profile -// is not found in any of the files. -func loadSharedConfigIgnoreNotExist(ctx context.Context, configs configs) (Config, error) { - cfg, err := loadSharedConfig(ctx, configs) - if err != nil { - if _, ok := err.(SharedConfigProfileNotExistError); ok { - return SharedConfig{}, nil - } - return nil, err - } - - return cfg, nil -} - -// loadSharedConfig uses the configs passed in to load the SharedConfig from file -// The file names and profile name are sourced from the configs. -// -// If profile name is not provided DefaultSharedConfigProfile (default) will -// be used. -// -// If shared config filenames are not provided DefaultSharedConfigFiles will -// be used. -// -// Config providers used: -// * sharedConfigProfileProvider -// * sharedConfigFilesProvider -func loadSharedConfig(ctx context.Context, configs configs) (Config, error) { - var profile string - var configFiles []string - var credentialsFiles []string - var ok bool - var err error - - profile, ok, err = getSharedConfigProfile(ctx, configs) - if err != nil { - return nil, err - } - if !ok { - profile = defaultSharedConfigProfile - } - - configFiles, ok, err = getSharedConfigFiles(ctx, configs) - if err != nil { - return nil, err - } - - credentialsFiles, ok, err = getSharedCredentialsFiles(ctx, configs) - if err != nil { - return nil, err - } - - // setup logger if log configuration warning is seti - var logger logging.Logger - logWarnings, found, err := getLogConfigurationWarnings(ctx, configs) - if err != nil { - return SharedConfig{}, err - } - if found && logWarnings { - logger, found, err = getLogger(ctx, configs) - if err != nil { - return SharedConfig{}, err - } - if !found { - logger = logging.NewStandardLogger(os.Stderr) - } - } - - return LoadSharedConfigProfile(ctx, profile, - func(o *LoadSharedConfigOptions) { - o.Logger = logger - o.ConfigFiles = configFiles - o.CredentialsFiles = credentialsFiles - }, - ) -} - -// LoadSharedConfigOptions struct contains optional values that can be used to load the config. -type LoadSharedConfigOptions struct { - - // CredentialsFiles are the shared credentials files - CredentialsFiles []string - - // ConfigFiles are the shared config files - ConfigFiles []string - - // Logger is the logger used to log shared config behavior - Logger logging.Logger -} - -// LoadSharedConfigProfile retrieves the configuration from the list of files -// using the profile provided. The order the files are listed will determine -// precedence. Values in subsequent files will overwrite values defined in -// earlier files. -// -// For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of A's. -// -// If config files are not set, SDK will default to using a file at location `.aws/config` if present. -// If credentials files are not set, SDK will default to using a file at location `.aws/credentials` if present. -// No default files are set, if files set to an empty slice. -// -// You can read more about shared config and credentials file location at -// https://docs.aws.amazon.com/credref/latest/refdocs/file-location.html#file-location -func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func(*LoadSharedConfigOptions)) (SharedConfig, error) { - var option LoadSharedConfigOptions - for _, fn := range optFns { - fn(&option) - } - - if option.ConfigFiles == nil { - option.ConfigFiles = DefaultSharedConfigFiles - } - - if option.CredentialsFiles == nil { - option.CredentialsFiles = DefaultSharedCredentialsFiles - } - - // load shared configuration sections from shared configuration INI options - configSections, err := loadIniFiles(option.ConfigFiles) - if err != nil { - return SharedConfig{}, err - } - - // check for profile prefix and drop duplicates or invalid profiles - err = processConfigSections(ctx, &configSections, option.Logger) - if err != nil { - return SharedConfig{}, err - } - - // load shared credentials sections from shared credentials INI options - credentialsSections, err := loadIniFiles(option.CredentialsFiles) - if err != nil { - return SharedConfig{}, err - } - - // check for profile prefix and drop duplicates or invalid profiles - err = processCredentialsSections(ctx, &credentialsSections, option.Logger) - if err != nil { - return SharedConfig{}, err - } - - err = mergeSections(&configSections, credentialsSections) - if err != nil { - return SharedConfig{}, err - } - - cfg := SharedConfig{} - profiles := map[string]struct{}{} - - if err = cfg.setFromIniSections(profiles, profile, configSections, option.Logger); err != nil { - return SharedConfig{}, err - } - - return cfg, nil -} - -func processConfigSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { - skipSections := map[string]struct{}{} - - for _, section := range sections.List() { - if _, ok := skipSections[section]; ok { - continue - } - - // drop sections from config file that do not have expected prefixes. - switch { - case strings.HasPrefix(section, profilePrefix): - // Rename sections to remove "profile " prefixing to match with - // credentials file. If default is already present, it will be - // dropped. - newName, err := renameProfileSection(section, sections, logger) - if err != nil { - return fmt.Errorf("failed to rename profile section, %w", err) - } - skipSections[newName] = struct{}{} - - case strings.HasPrefix(section, ssoSectionPrefix): - case strings.HasPrefix(section, servicesPrefix): - case strings.EqualFold(section, "default"): - default: - // drop this section, as invalid profile name - sections.DeleteSection(section) - - if logger != nil { - logger.Logf(logging.Debug, "A profile defined with name `%v` is ignored. "+ - "For use within a shared configuration file, "+ - "a non-default profile must have `profile ` "+ - "prefixed to the profile name.", - section, - ) - } - } - } - return nil -} - -func renameProfileSection(section string, sections *ini.Sections, logger logging.Logger) (string, error) { - v, ok := sections.GetSection(section) - if !ok { - return "", fmt.Errorf("error processing profiles within the shared configuration files") - } - - // delete section with profile as prefix - sections.DeleteSection(section) - - // set the value to non-prefixed name in sections. - section = strings.TrimPrefix(section, profilePrefix) - if sections.HasSection(section) { - oldSection, _ := sections.GetSection(section) - v.Logs = append(v.Logs, - fmt.Sprintf("A non-default profile not prefixed with `profile ` found in %s, "+ - "overriding non-default profile from %s", - v.SourceFile, oldSection.SourceFile)) - sections.DeleteSection(section) - } - - // assign non-prefixed name to section - v.Name = section - sections.SetSection(section, v) - - return section, nil -} - -func processCredentialsSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { - for _, section := range sections.List() { - // drop profiles with prefix for credential files - if strings.HasPrefix(section, profilePrefix) { - // drop this section, as invalid profile name - sections.DeleteSection(section) - - if logger != nil { - logger.Logf(logging.Debug, - "The profile defined with name `%v` is ignored. A profile with the `profile ` prefix is invalid "+ - "for the shared credentials file.\n", - section, - ) - } - } - } - return nil -} - -func loadIniFiles(filenames []string) (ini.Sections, error) { - mergedSections := ini.NewSections() - - for _, filename := range filenames { - sections, err := ini.OpenFile(filename) - var v *ini.UnableToReadFile - if ok := errors.As(err, &v); ok { - // Skip files which can't be opened and read for whatever reason. - // We treat such files as empty, and do not fall back to other locations. - continue - } else if err != nil { - return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} - } - - // mergeSections into mergedSections - err = mergeSections(&mergedSections, sections) - if err != nil { - return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} - } - } - - return mergedSections, nil -} - -// mergeSections merges source section properties into destination section properties -func mergeSections(dst *ini.Sections, src ini.Sections) error { - for _, sectionName := range src.List() { - srcSection, _ := src.GetSection(sectionName) - - if (!srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey)) || - (srcSection.Has(accessKeyIDKey) && !srcSection.Has(secretAccessKey)) { - srcSection.Errors = append(srcSection.Errors, - fmt.Errorf("partial credentials found for profile %v", sectionName)) - } - - if !dst.HasSection(sectionName) { - dst.SetSection(sectionName, srcSection) - continue - } - - // merge with destination srcSection - dstSection, _ := dst.GetSection(sectionName) - - // errors should be overriden if any - dstSection.Errors = srcSection.Errors - - // Access key id update - if srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey) { - accessKey := srcSection.String(accessKeyIDKey) - secretKey := srcSection.String(secretAccessKey) - - if dstSection.Has(accessKeyIDKey) { - dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, accessKeyIDKey, - dstSection.SourceFile[accessKeyIDKey], srcSection.SourceFile[accessKeyIDKey])) - } - - // update access key - v, err := ini.NewStringValue(accessKey) - if err != nil { - return fmt.Errorf("error merging access key, %w", err) - } - dstSection.UpdateValue(accessKeyIDKey, v) - - // update secret key - v, err = ini.NewStringValue(secretKey) - if err != nil { - return fmt.Errorf("error merging secret key, %w", err) - } - dstSection.UpdateValue(secretAccessKey, v) - - // update session token - if err = mergeStringKey(&srcSection, &dstSection, sectionName, sessionTokenKey); err != nil { - return err - } - - // update source file to reflect where the static creds came from - dstSection.UpdateSourceFile(accessKeyIDKey, srcSection.SourceFile[accessKeyIDKey]) - dstSection.UpdateSourceFile(secretAccessKey, srcSection.SourceFile[secretAccessKey]) - } - - stringKeys := []string{ - roleArnKey, - sourceProfileKey, - credentialSourceKey, - externalIDKey, - mfaSerialKey, - roleSessionNameKey, - regionKey, - enableEndpointDiscoveryKey, - credentialProcessKey, - webIdentityTokenFileKey, - s3UseARNRegionKey, - s3DisableMultiRegionAccessPointsKey, - ec2MetadataServiceEndpointModeKey, - ec2MetadataServiceEndpointKey, - ec2MetadataV1DisabledKey, - useDualStackEndpoint, - useFIPSEndpointKey, - defaultsModeKey, - retryModeKey, - caBundleKey, - roleDurationSecondsKey, - retryMaxAttemptsKey, - - ssoSessionNameKey, - ssoAccountIDKey, - ssoRegionKey, - ssoRoleNameKey, - ssoStartURLKey, - } - for i := range stringKeys { - if err := mergeStringKey(&srcSection, &dstSection, sectionName, stringKeys[i]); err != nil { - return err - } - } - - // set srcSection on dst srcSection - *dst = dst.SetSection(sectionName, dstSection) - } - - return nil -} - -func mergeStringKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error { - if srcSection.Has(key) { - srcValue := srcSection.String(key) - val, err := ini.NewStringValue(srcValue) - if err != nil { - return fmt.Errorf("error merging %s, %w", key, err) - } - - if dstSection.Has(key) { - dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key, - dstSection.SourceFile[key], srcSection.SourceFile[key])) - } - - dstSection.UpdateValue(key, val) - dstSection.UpdateSourceFile(key, srcSection.SourceFile[key]) - } - return nil -} - -func newMergeKeyLogMessage(sectionName, key, dstSourceFile, srcSourceFile string) string { - return fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+ - "with a %v value found in a duplicate profile defined at file %v. \n", - sectionName, key, dstSourceFile, key, srcSourceFile) -} - -// Returns an error if all of the files fail to load. If at least one file is -// successfully loaded and contains the profile, no error will be returned. -func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile string, - sections ini.Sections, logger logging.Logger) error { - c.Profile = profile - - section, ok := sections.GetSection(profile) - if !ok { - return SharedConfigProfileNotExistError{ - Profile: profile, - } - } - - // if logs are appended to the section, log them - if section.Logs != nil && logger != nil { - for _, log := range section.Logs { - logger.Logf(logging.Debug, log) - } - } - - // set config from the provided INI section - err := c.setFromIniSection(profile, section) - if err != nil { - return fmt.Errorf("error fetching config from profile, %v, %w", profile, err) - } - - if _, ok := profiles[profile]; ok { - // if this is the second instance of the profile the Assume Role - // options must be cleared because they are only valid for the - // first reference of a profile. The self linked instance of the - // profile only have credential provider options. - c.clearAssumeRoleOptions() - } else { - // First time a profile has been seen. Assert if the credential type - // requires a role ARN, the ARN is also set - if err := c.validateCredentialsConfig(profile); err != nil { - return err - } - } - - // if not top level profile and has credentials, return with credentials. - if len(profiles) != 0 && c.Credentials.HasKeys() { - return nil - } - - profiles[profile] = struct{}{} - - // validate no colliding credentials type are present - if err := c.validateCredentialType(); err != nil { - return err - } - - // Link source profiles for assume roles - if len(c.SourceProfileName) != 0 { - // Linked profile via source_profile ignore credential provider - // options, the source profile must provide the credentials. - c.clearCredentialOptions() - - srcCfg := &SharedConfig{} - err := srcCfg.setFromIniSections(profiles, c.SourceProfileName, sections, logger) - if err != nil { - // SourceProfileName that doesn't exist is an error in configuration. - if _, ok := err.(SharedConfigProfileNotExistError); ok { - err = SharedConfigAssumeRoleError{ - RoleARN: c.RoleARN, - Profile: c.SourceProfileName, - Err: err, - } - } - return err - } - - if !srcCfg.hasCredentials() { - return SharedConfigAssumeRoleError{ - RoleARN: c.RoleARN, - Profile: c.SourceProfileName, - } - } - - c.Source = srcCfg - } - - // If the profile contains an SSO session parameter, the session MUST exist - // as a section in the config file. Load the SSO session using the name - // provided. If the session section is not found or incomplete an error - // will be returned. - if c.hasSSOTokenProviderConfiguration() { - section, ok := sections.GetSection(ssoSectionPrefix + strings.TrimSpace(c.SSOSessionName)) - if !ok { - return fmt.Errorf("failed to find SSO session section, %v", c.SSOSessionName) - } - var ssoSession SSOSession - ssoSession.setFromIniSection(section) - ssoSession.Name = c.SSOSessionName - c.SSOSession = &ssoSession - } - - if len(c.ServicesSectionName) > 0 { - if section, ok := sections.GetSection(servicesPrefix + c.ServicesSectionName); ok { - var svcs Services - svcs.setFromIniSection(section) - c.Services = svcs - } - } - - return nil -} - -// setFromIniSection loads the configuration from the profile section defined in -// the provided INI file. A SharedConfig pointer type value is used so that -// multiple config file loadings can be chained. -// -// Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For example -// if a config file only includes aws_access_key_id but no aws_secret_access_key -// the aws_access_key_id will be ignored. -func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) error { - if len(section.Name) == 0 { - sources := make([]string, 0) - for _, v := range section.SourceFile { - sources = append(sources, v) - } - - return fmt.Errorf("parsing error : could not find profile section name after processing files: %v", sources) - } - - if len(section.Errors) != 0 { - var errStatement string - for i, e := range section.Errors { - errStatement = fmt.Sprintf("%d, %v\n", i+1, e.Error()) - } - return fmt.Errorf("Error using profile: \n %v", errStatement) - } - - // Assume Role - updateString(&c.RoleARN, section, roleArnKey) - updateString(&c.ExternalID, section, externalIDKey) - updateString(&c.MFASerial, section, mfaSerialKey) - updateString(&c.RoleSessionName, section, roleSessionNameKey) - updateString(&c.SourceProfileName, section, sourceProfileKey) - updateString(&c.CredentialSource, section, credentialSourceKey) - updateString(&c.Region, section, regionKey) - - // AWS Single Sign-On (AWS SSO) - // SSO session options - updateString(&c.SSOSessionName, section, ssoSessionNameKey) - - // Legacy SSO session options - updateString(&c.SSORegion, section, ssoRegionKey) - updateString(&c.SSOStartURL, section, ssoStartURLKey) - - // SSO fields not used - updateString(&c.SSOAccountID, section, ssoAccountIDKey) - updateString(&c.SSORoleName, section, ssoRoleNameKey) - - // we're retaining a behavioral quirk with this field that existed before - // the removal of literal parsing for #2276: - // - if the key is missing, the config field will not be set - // - if the key is set to a non-numeric, the config field will be set to 0 - if section.Has(roleDurationSecondsKey) { - if v, ok := section.Int(roleDurationSecondsKey); ok { - c.RoleDurationSeconds = aws.Duration(time.Duration(v) * time.Second) - } else { - c.RoleDurationSeconds = aws.Duration(time.Duration(0)) - } - } - - updateString(&c.CredentialProcess, section, credentialProcessKey) - updateString(&c.WebIdentityTokenFile, section, webIdentityTokenFileKey) - - updateEndpointDiscoveryType(&c.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) - updateBoolPtr(&c.S3UseARNRegion, section, s3UseARNRegionKey) - updateBoolPtr(&c.S3DisableMultiRegionAccessPoints, section, s3DisableMultiRegionAccessPointsKey) - updateBoolPtr(&c.S3DisableExpressAuth, section, s3DisableExpressSessionAuthKey) - - if err := updateEC2MetadataServiceEndpointMode(&c.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %v", ec2MetadataServiceEndpointModeKey, err) - } - updateString(&c.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) - updateBoolPtr(&c.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey) - - updateUseDualStackEndpoint(&c.UseDualStackEndpoint, section, useDualStackEndpoint) - updateUseFIPSEndpoint(&c.UseFIPSEndpoint, section, useFIPSEndpointKey) - - if err := updateDefaultsMode(&c.DefaultsMode, section, defaultsModeKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", defaultsModeKey, err) - } - - if err := updateInt(&c.RetryMaxAttempts, section, retryMaxAttemptsKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", retryMaxAttemptsKey, err) - } - if err := updateRetryMode(&c.RetryMode, section, retryModeKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", retryModeKey, err) - } - - updateString(&c.CustomCABundle, section, caBundleKey) - - // user agent app ID added to request User-Agent header - updateString(&c.AppID, section, sdkAppID) - - updateBoolPtr(&c.IgnoreConfiguredEndpoints, section, ignoreConfiguredEndpoints) - - updateString(&c.BaseEndpoint, section, endpointURL) - - if err := updateDisableRequestCompression(&c.DisableRequestCompression, section, disableRequestCompression); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", disableRequestCompression, err) - } - if err := updateRequestMinCompressSizeBytes(&c.RequestMinCompressSizeBytes, section, requestMinCompressionSizeBytes); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", requestMinCompressionSizeBytes, err) - } - - // Shared Credentials - creds := aws.Credentials{ - AccessKeyID: section.String(accessKeyIDKey), - SecretAccessKey: section.String(secretAccessKey), - SessionToken: section.String(sessionTokenKey), - Source: fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]), - } - - if creds.HasKeys() { - c.Credentials = creds - } - - updateString(&c.ServicesSectionName, section, servicesSectionKey) - - return nil -} - -func updateRequestMinCompressSizeBytes(bytes **int64, sec ini.Section, key string) error { - if !sec.Has(key) { - return nil - } - - v, ok := sec.Int(key) - if !ok { - return fmt.Errorf("invalid value for min request compression size bytes %s, need int64", sec.String(key)) - } - if v < 0 || v > smithyrequestcompression.MaxRequestMinCompressSizeBytes { - return fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", v) - } - *bytes = new(int64) - **bytes = v - return nil -} - -func updateDisableRequestCompression(disable **bool, sec ini.Section, key string) error { - if !sec.Has(key) { - return nil - } - - v := sec.String(key) - switch { - case v == "true": - *disable = new(bool) - **disable = true - case v == "false": - *disable = new(bool) - **disable = false - default: - return fmt.Errorf("invalid value for shared config profile field, %s=%s, need true or false", key, v) - } - return nil -} - -func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { - if c.RequestMinCompressSizeBytes == nil { - return 0, false, nil - } - return *c.RequestMinCompressSizeBytes, true, nil -} - -func (c SharedConfig) getDisableRequestCompression(ctx context.Context) (bool, bool, error) { - if c.DisableRequestCompression == nil { - return false, false, nil - } - return *c.DisableRequestCompression, true, nil -} - -func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error { - if !section.Has(key) { - return nil - } - value := section.String(key) - if ok := mode.SetFromString(value); !ok { - return fmt.Errorf("invalid value: %s", value) - } - return nil -} - -func updateRetryMode(mode *aws.RetryMode, section ini.Section, key string) (err error) { - if !section.Has(key) { - return nil - } - value := section.String(key) - if *mode, err = aws.ParseRetryMode(value); err != nil { - return err - } - return nil -} - -func updateEC2MetadataServiceEndpointMode(endpointMode *imds.EndpointModeState, section ini.Section, key string) error { - if !section.Has(key) { - return nil - } - value := section.String(key) - return endpointMode.SetFromString(value) -} - -func (c *SharedConfig) validateCredentialsConfig(profile string) error { - if err := c.validateCredentialsRequireARN(profile); err != nil { - return err - } - - return nil -} - -func (c *SharedConfig) validateCredentialsRequireARN(profile string) error { - var credSource string - - switch { - case len(c.SourceProfileName) != 0: - credSource = sourceProfileKey - case len(c.CredentialSource) != 0: - credSource = credentialSourceKey - case len(c.WebIdentityTokenFile) != 0: - credSource = webIdentityTokenFileKey - } - - if len(credSource) != 0 && len(c.RoleARN) == 0 { - return CredentialRequiresARNError{ - Type: credSource, - Profile: profile, - } - } - - return nil -} - -func (c *SharedConfig) validateCredentialType() error { - // Only one or no credential type can be defined. - if !oneOrNone( - len(c.SourceProfileName) != 0, - len(c.CredentialSource) != 0, - len(c.CredentialProcess) != 0, - len(c.WebIdentityTokenFile) != 0, - ) { - return fmt.Errorf("only one credential type may be specified per profile: source profile, credential source, credential process, web identity token") - } - - return nil -} - -func (c *SharedConfig) validateSSOConfiguration() error { - if c.hasSSOTokenProviderConfiguration() { - err := c.validateSSOTokenProviderConfiguration() - if err != nil { - return err - } - return nil - } - - if c.hasLegacySSOConfiguration() { - err := c.validateLegacySSOConfiguration() - if err != nil { - return err - } - } - return nil -} - -func (c *SharedConfig) validateSSOTokenProviderConfiguration() error { - var missing []string - - if len(c.SSOSessionName) == 0 { - missing = append(missing, ssoSessionNameKey) - } - - if c.SSOSession == nil { - missing = append(missing, ssoSectionPrefix) - } else { - if len(c.SSOSession.SSORegion) == 0 { - missing = append(missing, ssoRegionKey) - } - - if len(c.SSOSession.SSOStartURL) == 0 { - missing = append(missing, ssoStartURLKey) - } - } - - if len(missing) > 0 { - return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", - c.Profile, strings.Join(missing, ", ")) - } - - if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion { - return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix) - } - - if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL { - return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURLKey, c.Profile, ssoStartURLKey, ssoSectionPrefix) - } - - return nil -} - -func (c *SharedConfig) validateLegacySSOConfiguration() error { - var missing []string - - if len(c.SSORegion) == 0 { - missing = append(missing, ssoRegionKey) - } - - if len(c.SSOStartURL) == 0 { - missing = append(missing, ssoStartURLKey) - } - - if len(c.SSOAccountID) == 0 { - missing = append(missing, ssoAccountIDKey) - } - - if len(c.SSORoleName) == 0 { - missing = append(missing, ssoRoleNameKey) - } - - if len(missing) > 0 { - return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", - c.Profile, strings.Join(missing, ", ")) - } - return nil -} - -func (c *SharedConfig) hasCredentials() bool { - switch { - case len(c.SourceProfileName) != 0: - case len(c.CredentialSource) != 0: - case len(c.CredentialProcess) != 0: - case len(c.WebIdentityTokenFile) != 0: - case c.hasSSOConfiguration(): - case c.Credentials.HasKeys(): - default: - return false - } - - return true -} - -func (c *SharedConfig) hasSSOConfiguration() bool { - return c.hasSSOTokenProviderConfiguration() || c.hasLegacySSOConfiguration() -} - -func (c *SharedConfig) hasSSOTokenProviderConfiguration() bool { - return len(c.SSOSessionName) > 0 -} - -func (c *SharedConfig) hasLegacySSOConfiguration() bool { - return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0 -} - -func (c *SharedConfig) clearAssumeRoleOptions() { - c.RoleARN = "" - c.ExternalID = "" - c.MFASerial = "" - c.RoleSessionName = "" - c.SourceProfileName = "" -} - -func (c *SharedConfig) clearCredentialOptions() { - c.CredentialSource = "" - c.CredentialProcess = "" - c.WebIdentityTokenFile = "" - c.Credentials = aws.Credentials{} - c.SSOAccountID = "" - c.SSORegion = "" - c.SSORoleName = "" - c.SSOStartURL = "" -} - -// SharedConfigLoadError is an error for the shared config file failed to load. -type SharedConfigLoadError struct { - Filename string - Err error -} - -// Unwrap returns the underlying error that caused the failure. -func (e SharedConfigLoadError) Unwrap() error { - return e.Err -} - -func (e SharedConfigLoadError) Error() string { - return fmt.Sprintf("failed to load shared config file, %s, %v", e.Filename, e.Err) -} - -// SharedConfigProfileNotExistError is an error for the shared config when -// the profile was not find in the config file. -type SharedConfigProfileNotExistError struct { - Filename []string - Profile string - Err error -} - -// Unwrap returns the underlying error that caused the failure. -func (e SharedConfigProfileNotExistError) Unwrap() error { - return e.Err -} - -func (e SharedConfigProfileNotExistError) Error() string { - return fmt.Sprintf("failed to get shared config profile, %s", e.Profile) -} - -// SharedConfigAssumeRoleError is an error for the shared config when the -// profile contains assume role information, but that information is invalid -// or not complete. -type SharedConfigAssumeRoleError struct { - Profile string - RoleARN string - Err error -} - -// Unwrap returns the underlying error that caused the failure. -func (e SharedConfigAssumeRoleError) Unwrap() error { - return e.Err -} - -func (e SharedConfigAssumeRoleError) Error() string { - return fmt.Sprintf("failed to load assume role %s, of profile %s, %v", - e.RoleARN, e.Profile, e.Err) -} - -// CredentialRequiresARNError provides the error for shared config credentials -// that are incorrectly configured in the shared config or credentials file. -type CredentialRequiresARNError struct { - // type of credentials that were configured. - Type string - - // Profile name the credentials were in. - Profile string -} - -// Error satisfies the error interface. -func (e CredentialRequiresARNError) Error() string { - return fmt.Sprintf( - "credential type %s requires role_arn, profile %s", - e.Type, e.Profile, - ) -} - -func oneOrNone(bs ...bool) bool { - var count int - - for _, b := range bs { - if b { - count++ - if count > 1 { - return false - } - } - } - - return true -} - -// updateString will only update the dst with the value in the section key, key -// is present in the section. -func updateString(dst *string, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = section.String(key) -} - -// updateInt will only update the dst with the value in the section key, key -// is present in the section. -// -// Down casts the INI integer value from a int64 to an int, which could be -// different bit size depending on platform. -func updateInt(dst *int, section ini.Section, key string) error { - if !section.Has(key) { - return nil - } - - v, ok := section.Int(key) - if !ok { - return fmt.Errorf("invalid value %s=%s, expect integer", key, section.String(key)) - } - - *dst = int(v) - return nil -} - -// updateBool will only update the dst with the value in the section key, key -// is present in the section. -func updateBool(dst *bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - - // retains pre-#2276 behavior where non-bool value would resolve to false - v, _ := section.Bool(key) - *dst = v -} - -// updateBoolPtr will only update the dst with the value in the section key, -// key is present in the section. -func updateBoolPtr(dst **bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - - // retains pre-#2276 behavior where non-bool value would resolve to false - v, _ := section.Bool(key) - *dst = new(bool) - **dst = v -} - -// updateEndpointDiscoveryType will only update the dst with the value in the section, if -// a valid key and corresponding EndpointDiscoveryType is found. -func updateEndpointDiscoveryType(dst *aws.EndpointDiscoveryEnableState, section ini.Section, key string) { - if !section.Has(key) { - return - } - - value := section.String(key) - if len(value) == 0 { - return - } - - switch { - case strings.EqualFold(value, endpointDiscoveryDisabled): - *dst = aws.EndpointDiscoveryDisabled - case strings.EqualFold(value, endpointDiscoveryEnabled): - *dst = aws.EndpointDiscoveryEnabled - case strings.EqualFold(value, endpointDiscoveryAuto): - *dst = aws.EndpointDiscoveryAuto - } -} - -// updateEndpointDiscoveryType will only update the dst with the value in the section, if -// a valid key and corresponding EndpointDiscoveryType is found. -func updateUseDualStackEndpoint(dst *aws.DualStackEndpointState, section ini.Section, key string) { - if !section.Has(key) { - return - } - - // retains pre-#2276 behavior where non-bool value would resolve to false - if v, _ := section.Bool(key); v { - *dst = aws.DualStackEndpointStateEnabled - } else { - *dst = aws.DualStackEndpointStateDisabled - } - - return -} - -// updateEndpointDiscoveryType will only update the dst with the value in the section, if -// a valid key and corresponding EndpointDiscoveryType is found. -func updateUseFIPSEndpoint(dst *aws.FIPSEndpointState, section ini.Section, key string) { - if !section.Has(key) { - return - } - - // retains pre-#2276 behavior where non-bool value would resolve to false - if v, _ := section.Bool(key); v { - *dst = aws.FIPSEndpointStateEnabled - } else { - *dst = aws.FIPSEndpointStateDisabled - } - - return -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md deleted file mode 100644 index 399f089697..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ /dev/null @@ -1,519 +0,0 @@ -# v1.17.9 (2024-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.8 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.7 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.6 (2024-03-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2024-03-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.4 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.3 (2024-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2024-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.16 (2024-01-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.15 (2024-01-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.14 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.13 (2023-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.12 (2023-12-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.11 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.10 (2023-12-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.9 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.8 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.7 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.6 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.5 (2023-11-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.4 (2023-11-21) - -* **Bug Fix**: Don't expect error responses to have a JSON payload in the endpointcreds provider. - -# v1.16.3 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.2 (2023-11-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.1 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2023-11-14) - -* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider. - -# v1.15.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2023-11-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.43 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.42 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.41 (2023-10-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.40 (2023-09-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.39 (2023-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.38 (2023-09-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.37 (2023-09-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.36 (2023-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.35 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.34 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.33 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.32 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.31 (2023-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.30 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.29 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.28 (2023-07-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.27 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.26 (2023-06-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.25 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.24 (2023-05-09) - -* No change notes available for this release. - -# v1.13.23 (2023-05-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.22 (2023-05-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.21 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.20 (2023-04-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.19 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.18 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.17 (2023-03-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.16 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.15 (2023-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.14 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.13 (2023-02-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.12 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.11 (2023-02-01) - -* No change notes available for this release. - -# v1.13.10 (2023-01-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.9 (2023-01-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.8 (2023-01-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.7 (2022-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.6 (2022-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.5 (2022-12-15) - -* **Bug Fix**: Unify logic between shared config and in finding home directory -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2022-11-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2022-11-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2022-11-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-11-11) - -* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 -* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider - -# v1.12.24 (2022-11-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.23 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.22 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.21 (2022-09-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.20 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.19 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.18 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.17 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.16 (2022-08-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.15 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.14 (2022-08-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.13 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.12 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.11 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.10 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.9 (2022-07-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.6 (2022-06-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.5 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.4 (2022-05-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.3 (2022-05-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.2 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.1 (2022-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2022-04-25) - -* **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.2 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-03-23) - -* **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-02-24) - -* **Feature**: Adds support for `SourceIdentity` to `stscreds.AssumeRoleProvider` [#1588](https://github.com/aws/aws-sdk-go-v2/pull/1588). Fixes [#1575](https://github.com/aws/aws-sdk-go-v2/issues/1575) -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.5 (2021-12-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.4 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.3 (2021-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.2 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.1 (2021-11-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.3 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.2 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-09-10) - -* **Documentation**: Fixes the AssumeRoleProvider's documentation for using custom TokenProviders. - -# v1.4.0 (2021-08-27) - -* **Feature**: Adds support for Tags and TransitiveTagKeys to stscreds.AssumeRoleProvider. Closes https://github.com/aws/aws-sdk-go-v2/issues/723 -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Bug Fix**: Fixed example usages of aws.CredentialsCache ([#1275](https://github.com/aws/aws-sdk-go-v2/pull/1275)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go deleted file mode 100644 index f6e2873ab9..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package credentials provides types for retrieving credentials from credentials sources. -*/ -package credentials diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go deleted file mode 100644 index 6ed71b42b2..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go +++ /dev/null @@ -1,58 +0,0 @@ -// Package ec2rolecreds provides the credentials provider implementation for -// retrieving AWS credentials from Amazon EC2 Instance Roles via Amazon EC2 IMDS. -// -// # Concurrency and caching -// -// The Provider is not safe to be used concurrently, and does not provide any -// caching of credentials retrieved. You should wrap the Provider with a -// `aws.CredentialsCache` to provide concurrency safety, and caching of -// credentials. -// -// # Loading credentials with the SDK's AWS Config -// -// The EC2 Instance role credentials provider will automatically be the resolved -// credential provider in the credential chain if no other credential provider is -// resolved first. -// -// To explicitly instruct the SDK's credentials resolving to use the EC2 Instance -// role for credentials, you specify a `credentials_source` property in the config -// profile the SDK will load. -// -// [default] -// credential_source = Ec2InstanceMetadata -// -// # Loading credentials with the Provider directly -// -// Another way to use the EC2 Instance role credentials provider is to create it -// directly and assign it as the credentials provider for an API client. -// -// The following example creates a credentials provider for a command, and wraps -// it with the CredentialsCache before assigning the provider to the Amazon S3 API -// client's Credentials option. -// -// provider := imds.New(imds.Options{}) -// -// // Create the service client value configured for credentials. -// svc := s3.New(s3.Options{ -// Credentials: aws.NewCredentialsCache(provider), -// }) -// -// If you need more control, you can set the configuration options on the -// credentials provider using the imds.Options type to configure the EC2 IMDS -// API Client and ExpiryWindow of the retrieved credentials. -// -// provider := imds.New(imds.Options{ -// // See imds.Options type's documentation for more options available. -// Client: imds.New(Options{ -// HTTPClient: customHTTPClient, -// }), -// -// // Modify how soon credentials expire prior to their original expiry time. -// ExpiryWindow: 5 * time.Minute, -// }) -// -// # EC2 IMDS API Client -// -// See the github.com/aws/aws-sdk-go-v2/feature/ec2/imds module for more details on -// configuring the client, and options available. -package ec2rolecreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go deleted file mode 100644 index 5c699f1665..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go +++ /dev/null @@ -1,229 +0,0 @@ -package ec2rolecreds - -import ( - "bufio" - "context" - "encoding/json" - "fmt" - "math" - "path" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// ProviderName provides a name of EC2Role provider -const ProviderName = "EC2RoleProvider" - -// GetMetadataAPIClient provides the interface for an EC2 IMDS API client for the -// GetMetadata operation. -type GetMetadataAPIClient interface { - GetMetadata(context.Context, *imds.GetMetadataInput, ...func(*imds.Options)) (*imds.GetMetadataOutput, error) -} - -// A Provider retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -// -// The New function must be used to create the with a custom EC2 IMDS client. -// -// p := &ec2rolecreds.New(func(o *ec2rolecreds.Options{ -// o.Client = imds.New(imds.Options{/* custom options */}) -// }) -type Provider struct { - options Options -} - -// Options is a list of user settable options for setting the behavior of the Provider. -type Options struct { - // The API client that will be used by the provider to make GetMetadata API - // calls to EC2 IMDS. - // - // If nil, the provider will default to the EC2 IMDS client. - Client GetMetadataAPIClient -} - -// New returns an initialized Provider value configured to retrieve -// credentials from EC2 Instance Metadata service. -func New(optFns ...func(*Options)) *Provider { - options := Options{} - - for _, fn := range optFns { - fn(&options) - } - - if options.Client == nil { - options.Client = imds.New(imds.Options{}) - } - - return &Provider{ - options: options, - } -} - -// Retrieve retrieves credentials from the EC2 service. Error will be returned -// if the request fails, or unable to extract the desired credentials. -func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { - credsList, err := requestCredList(ctx, p.options.Client) - if err != nil { - return aws.Credentials{Source: ProviderName}, err - } - - if len(credsList) == 0 { - return aws.Credentials{Source: ProviderName}, - fmt.Errorf("unexpected empty EC2 IMDS role list") - } - credsName := credsList[0] - - roleCreds, err := requestCred(ctx, p.options.Client, credsName) - if err != nil { - return aws.Credentials{Source: ProviderName}, err - } - - creds := aws.Credentials{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - Source: ProviderName, - - CanExpire: true, - Expires: roleCreds.Expiration, - } - - // Cap role credentials Expires to 1 hour so they can be refreshed more - // often. Jitter will be applied credentials cache if being used. - if anHour := sdk.NowTime().Add(1 * time.Hour); creds.Expires.After(anHour) { - creds.Expires = anHour - } - - return creds, nil -} - -// HandleFailToRefresh will extend the credentials Expires time if it it is -// expired. If the credentials will not expire within the minimum time, they -// will be returned. -// -// If the credentials cannot expire, the original error will be returned. -func (p *Provider) HandleFailToRefresh(ctx context.Context, prevCreds aws.Credentials, err error) ( - aws.Credentials, error, -) { - if !prevCreds.CanExpire { - return aws.Credentials{}, err - } - - if prevCreds.Expires.After(sdk.NowTime().Add(5 * time.Minute)) { - return prevCreds, nil - } - - newCreds := prevCreds - randFloat64, err := sdkrand.CryptoRandFloat64() - if err != nil { - return aws.Credentials{}, fmt.Errorf("failed to get random float, %w", err) - } - - // Random distribution of [5,15) minutes. - expireOffset := time.Duration(randFloat64*float64(10*time.Minute)) + 5*time.Minute - newCreds.Expires = sdk.NowTime().Add(expireOffset) - - logger := middleware.GetLogger(ctx) - logger.Logf(logging.Warn, "Attempting credential expiration extension due to a credential service availability issue. A refresh of these credentials will be attempted again in %v minutes.", math.Floor(expireOffset.Minutes())) - - return newCreds, nil -} - -// AdjustExpiresBy will adds the passed in duration to the passed in -// credential's Expires time, unless the time until Expires is less than 15 -// minutes. Returns the credentials, even if not updated. -func (p *Provider) AdjustExpiresBy(creds aws.Credentials, dur time.Duration) ( - aws.Credentials, error, -) { - if !creds.CanExpire { - return creds, nil - } - if creds.Expires.Before(sdk.NowTime().Add(15 * time.Minute)) { - return creds, nil - } - - creds.Expires = creds.Expires.Add(dur) - return creds, nil -} - -// ec2RoleCredRespBody provides the shape for unmarshaling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string -} - -const iamSecurityCredsPath = "/iam/security-credentials/" - -// requestCredList requests a list of credentials from the EC2 service. If -// there are no credentials, or there is an error making or receiving the -// request -func requestCredList(ctx context.Context, client GetMetadataAPIClient) ([]string, error) { - resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ - Path: iamSecurityCredsPath, - }) - if err != nil { - return nil, fmt.Errorf("no EC2 IMDS role found, %w", err) - } - defer resp.Content.Close() - - credsList := []string{} - s := bufio.NewScanner(resp.Content) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, fmt.Errorf("failed to read EC2 IMDS role, %w", err) - } - - return credsList, nil -} - -// requestCred requests the credentials for a specific credentials from the EC2 service. -// -// If the credentials cannot be found, or there is an error reading the response -// and error will be returned. -func requestCred(ctx context.Context, client GetMetadataAPIClient, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ - Path: path.Join(iamSecurityCredsPath, credsName), - }) - if err != nil { - return ec2RoleCredRespBody{}, - fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", - credsName, err) - } - defer resp.Content.Close() - - var respCreds ec2RoleCredRespBody - if err := json.NewDecoder(resp.Content).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, - fmt.Errorf("failed to decode %s EC2 IMDS role credentials, %w", - credsName, err) - } - - if !strings.EqualFold(respCreds.Code, "Success") { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, - fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", - credsName, - &smithy.GenericAPIError{Code: respCreds.Code, Message: respCreds.Message}) - } - - return respCreds, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go deleted file mode 100644 index c3f5dadcec..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go +++ /dev/null @@ -1,48 +0,0 @@ -package client - -import ( - "context" - "github.com/aws/smithy-go/middleware" -) - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} - -type signRequestMiddleware struct { -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go deleted file mode 100644 index 9a869f8954..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go +++ /dev/null @@ -1,164 +0,0 @@ -package client - -import ( - "context" - "fmt" - "net/http" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - "github.com/aws/smithy-go" - smithymiddleware "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// ServiceID is the client identifer -const ServiceID = "endpoint-credentials" - -// HTTPClient is a client for sending HTTP requests -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// Options is the endpoint client configurable options -type Options struct { - // The endpoint to retrieve credentials from - Endpoint string - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. - Retryer aws.Retryer - - // Set of options to modify how the credentials operation is invoked. - APIOptions []func(*smithymiddleware.Stack) error -} - -// Copy creates a copy of the API options. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*smithymiddleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - return to -} - -// Client is an client for retrieving AWS credentials from an endpoint -type Client struct { - options Options -} - -// New constructs a new Client from the given options -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - if options.HTTPClient == nil { - options.HTTPClient = awshttp.NewBuildableClient() - } - - if options.Retryer == nil { - // Amazon-owned implementations of this endpoint are known to sometimes - // return plaintext responses (i.e. no Code) like normal, add a few - // additional status codes - options.Retryer = retry.NewStandard(func(o *retry.StandardOptions) { - o.Retryables = append(o.Retryables, retry.RetryableHTTPStatusCode{ - Codes: map[int]struct{}{ - http.StatusTooManyRequests: {}, - }, - }) - }) - } - - for _, fn := range optFns { - fn(&options) - } - - client := &Client{ - options: options, - } - - return client -} - -// GetCredentialsInput is the input to send with the endpoint service to receive credentials. -type GetCredentialsInput struct { - AuthorizationToken string -} - -// GetCredentials retrieves credentials from credential endpoint -func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput, optFns ...func(*Options)) (*GetCredentialsOutput, error) { - stack := smithymiddleware.NewStack("GetCredentials", smithyhttp.NewStackRequest) - options := c.options.Copy() - for _, fn := range optFns { - fn(&options) - } - - stack.Serialize.Add(&serializeOpGetCredential{}, smithymiddleware.After) - stack.Build.Add(&buildEndpoint{Endpoint: options.Endpoint}, smithymiddleware.After) - stack.Deserialize.Add(&deserializeOpGetCredential{}, smithymiddleware.After) - addProtocolFinalizerMiddlewares(stack, options, "GetCredentials") - retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{Retryer: options.Retryer}) - middleware.AddSDKAgentKey(middleware.FeatureMetadata, ServiceID) - smithyhttp.AddErrorCloseResponseBodyMiddleware(stack) - smithyhttp.AddCloseResponseBodyMiddleware(stack) - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, err - } - } - - handler := smithymiddleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, _, err := handler.Handle(ctx, params) - if err != nil { - return nil, err - } - - return result.(*GetCredentialsOutput), err -} - -// GetCredentialsOutput is the response from the credential endpoint -type GetCredentialsOutput struct { - Expiration *time.Time - AccessKeyID string - SecretAccessKey string - Token string -} - -// EndpointError is an error returned from the endpoint service -type EndpointError struct { - Code string `json:"code"` - Message string `json:"message"` - Fault smithy.ErrorFault `json:"-"` - statusCode int `json:"-"` -} - -// Error is the error mesage string -func (e *EndpointError) Error() string { - return fmt.Sprintf("%s: %s", e.Code, e.Message) -} - -// ErrorCode is the error code returned by the endpoint -func (e *EndpointError) ErrorCode() string { - return e.Code -} - -// ErrorMessage is the error message returned by the endpoint -func (e *EndpointError) ErrorMessage() string { - return e.Message -} - -// ErrorFault indicates error fault classification -func (e *EndpointError) ErrorFault() smithy.ErrorFault { - return e.Fault -} - -// HTTPStatusCode implements retry.HTTPStatusCode. -func (e *EndpointError) HTTPStatusCode() int { - return e.statusCode -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go deleted file mode 100644 index 748ee67244..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go +++ /dev/null @@ -1,20 +0,0 @@ -package client - -import ( - "context" - "github.com/aws/smithy-go/middleware" -) - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go deleted file mode 100644 index f2820d20ea..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go +++ /dev/null @@ -1,164 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/url" - - "github.com/aws/smithy-go" - smithymiddleware "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -type buildEndpoint struct { - Endpoint string -} - -func (b *buildEndpoint) ID() string { - return "BuildEndpoint" -} - -func (b *buildEndpoint) HandleBuild(ctx context.Context, in smithymiddleware.BuildInput, next smithymiddleware.BuildHandler) ( - out smithymiddleware.BuildOutput, metadata smithymiddleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport, %T", in.Request) - } - - if len(b.Endpoint) == 0 { - return out, metadata, fmt.Errorf("endpoint not provided") - } - - parsed, err := url.Parse(b.Endpoint) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint, %w", err) - } - - request.URL = parsed - - return next.HandleBuild(ctx, in) -} - -type serializeOpGetCredential struct{} - -func (s *serializeOpGetCredential) ID() string { - return "OperationSerializer" -} - -func (s *serializeOpGetCredential) HandleSerialize(ctx context.Context, in smithymiddleware.SerializeInput, next smithymiddleware.SerializeHandler) ( - out smithymiddleware.SerializeOutput, metadata smithymiddleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type, %T", in.Request) - } - - params, ok := in.Parameters.(*GetCredentialsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters, %T", in.Parameters) - } - - const acceptHeader = "Accept" - request.Header[acceptHeader] = append(request.Header[acceptHeader][:0], "application/json") - - if len(params.AuthorizationToken) > 0 { - const authHeader = "Authorization" - request.Header[authHeader] = append(request.Header[authHeader][:0], params.AuthorizationToken) - } - - return next.HandleSerialize(ctx, in) -} - -type deserializeOpGetCredential struct{} - -func (d *deserializeOpGetCredential) ID() string { - return "OperationDeserializer" -} - -func (d *deserializeOpGetCredential) HandleDeserialize(ctx context.Context, in smithymiddleware.DeserializeInput, next smithymiddleware.DeserializeHandler) ( - out smithymiddleware.DeserializeOutput, metadata smithymiddleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, deserializeError(response) - } - - var shape *GetCredentialsOutput - if err = json.NewDecoder(response.Body).Decode(&shape); err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize json response, %w", err)} - } - - out.Result = shape - return out, metadata, err -} - -func deserializeError(response *smithyhttp.Response) error { - // we could be talking to anything, json isn't guaranteed - // see https://github.com/aws/aws-sdk-go-v2/issues/2316 - if response.Header.Get("Content-Type") == "application/json" { - return deserializeJSONError(response) - } - - msg, err := io.ReadAll(response.Body) - if err != nil { - return &smithy.DeserializationError{ - Err: fmt.Errorf("read response, %w", err), - } - } - - return &EndpointError{ - // no sensible value for Code - Message: string(msg), - Fault: stof(response.StatusCode), - statusCode: response.StatusCode, - } -} - -func deserializeJSONError(response *smithyhttp.Response) error { - var errShape *EndpointError - if err := json.NewDecoder(response.Body).Decode(&errShape); err != nil { - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode error message, %w", err), - } - } - - errShape.Fault = stof(response.StatusCode) - errShape.statusCode = response.StatusCode - return errShape -} - -// maps HTTP status code to smithy ErrorFault -func stof(code int) smithy.ErrorFault { - if code >= 500 { - return smithy.FaultServer - } - return smithy.FaultClient -} - -func addProtocolFinalizerMiddlewares(stack *smithymiddleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, smithymiddleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", smithymiddleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %w", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", smithymiddleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %w", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", smithymiddleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go deleted file mode 100644 index 0c3c4d6826..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go +++ /dev/null @@ -1,192 +0,0 @@ -// Package endpointcreds provides support for retrieving credentials from an -// arbitrary HTTP endpoint. -// -// The credentials endpoint Provider can receive both static and refreshable -// credentials that will expire. Credentials are static when an "Expiration" -// value is not provided in the endpoint's response. -// -// Static credentials will never expire once they have been retrieved. The format -// of the static credentials response: -// -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// } -// -// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration -// value in the response. The format of the refreshable credentials response: -// -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// "Token" : "AQoDY....=", -// "Expiration" : "2016-02-25T06:03:31Z" -// } -// -// Errors should be returned in the following format and only returned with 400 -// or 500 HTTP status codes. -// -// { -// "code": "ErrorCode", -// "message": "Helpful error message." -// } -package endpointcreds - -import ( - "context" - "fmt" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client" - "github.com/aws/smithy-go/middleware" -) - -// ProviderName is the name of the credentials provider. -const ProviderName = `CredentialsEndpointProvider` - -type getCredentialsAPIClient interface { - GetCredentials(context.Context, *client.GetCredentialsInput, ...func(*client.Options)) (*client.GetCredentialsOutput, error) -} - -// Provider satisfies the aws.CredentialsProvider interface, and is a client to -// retrieve credentials from an arbitrary endpoint. -type Provider struct { - // The AWS Client to make HTTP requests to the endpoint with. The endpoint - // the request will be made to is provided by the aws.Config's - // EndpointResolver. - client getCredentialsAPIClient - - options Options -} - -// HTTPClient is a client for sending HTTP requests -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// Options is structure of configurable options for Provider -type Options struct { - // Endpoint to retrieve credentials from. Required - Endpoint string - - // HTTPClient to handle sending HTTP requests to the target endpoint. - HTTPClient HTTPClient - - // Set of options to modify how the credentials operation is invoked. - APIOptions []func(*middleware.Stack) error - - // The Retryer to be used for determining whether a failed requested should be retried - Retryer aws.Retryer - - // Optional authorization token value if set will be used as the value of - // the Authorization header of the endpoint credential request. - // - // When constructed from environment, the provider will use the value of - // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token - // - // Will be overridden if AuthorizationTokenProvider is configured - AuthorizationToken string - - // Optional auth provider func to dynamically load the auth token from a file - // everytime a credential is retrieved - // - // When constructed from environment, the provider will read and use the content - // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable - // as the auth token everytime credentials are retrieved - // - // Will override AuthorizationToken if configured - AuthorizationTokenProvider AuthTokenProvider -} - -// AuthTokenProvider defines an interface to dynamically load a value to be passed -// for the Authorization header of a credentials request. -type AuthTokenProvider interface { - GetToken() (string, error) -} - -// TokenProviderFunc is a func type implementing AuthTokenProvider interface -// and enables customizing token provider behavior -type TokenProviderFunc func() (string, error) - -// GetToken func retrieves auth token according to TokenProviderFunc implementation -func (p TokenProviderFunc) GetToken() (string, error) { - return p() -} - -// New returns a credentials Provider for retrieving AWS credentials -// from arbitrary endpoint. -func New(endpoint string, optFns ...func(*Options)) *Provider { - o := Options{ - Endpoint: endpoint, - } - - for _, fn := range optFns { - fn(&o) - } - - p := &Provider{ - client: client.New(client.Options{ - HTTPClient: o.HTTPClient, - Endpoint: o.Endpoint, - APIOptions: o.APIOptions, - Retryer: o.Retryer, - }), - options: o, - } - - return p -} - -// Retrieve will attempt to request the credentials from the endpoint the Provider -// was configured for. And error will be returned if the retrieval fails. -func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { - resp, err := p.getCredentials(ctx) - if err != nil { - return aws.Credentials{}, fmt.Errorf("failed to load credentials, %w", err) - } - - creds := aws.Credentials{ - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.Token, - Source: ProviderName, - } - - if resp.Expiration != nil { - creds.CanExpire = true - creds.Expires = *resp.Expiration - } - - return creds, nil -} - -func (p *Provider) getCredentials(ctx context.Context) (*client.GetCredentialsOutput, error) { - authToken, err := p.resolveAuthToken() - if err != nil { - return nil, fmt.Errorf("resolve auth token: %v", err) - } - - return p.client.GetCredentials(ctx, &client.GetCredentialsInput{ - AuthorizationToken: authToken, - }) -} - -func (p *Provider) resolveAuthToken() (string, error) { - authToken := p.options.AuthorizationToken - - var err error - if p.options.AuthorizationTokenProvider != nil { - authToken, err = p.options.AuthorizationTokenProvider.GetToken() - if err != nil { - return "", err - } - } - - if strings.ContainsAny(authToken, "\r\n") { - return "", fmt.Errorf("authorization token contains invalid newline sequence") - } - - return authToken, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go deleted file mode 100644 index 2b4ff3895b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package credentials - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go deleted file mode 100644 index a3137b8fa9..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go +++ /dev/null @@ -1,92 +0,0 @@ -// Package processcreds is a credentials provider to retrieve credentials from a -// external CLI invoked process. -// -// WARNING: The following describes a method of sourcing credentials from an external -// process. This can potentially be dangerous, so proceed with caution. Other -// credential providers should be preferred if at all possible. If using this -// option, you should make sure that the config file is as locked down as possible -// using security best practices for your operating system. -// -// # Concurrency and caching -// -// The Provider is not safe to be used concurrently, and does not provide any -// caching of credentials retrieved. You should wrap the Provider with a -// `aws.CredentialsCache` to provide concurrency safety, and caching of -// credentials. -// -// # Loading credentials with the SDKs AWS Config -// -// You can use credentials from a AWS shared config `credential_process` in a -// variety of ways. -// -// One way is to setup your shared config file, located in the default -// location, with the `credential_process` key and the command you want to be -// called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable -// (e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. -// -// [default] -// credential_process = /command/to/call -// -// Loading configuration using external will use the credential process to -// retrieve credentials. NOTE: If there are credentials in the profile you are -// using, the credential process will not be used. -// -// // Initialize a session to load credentials. -// cfg, _ := config.LoadDefaultConfig(context.TODO()) -// -// // Create S3 service client to use the credentials. -// svc := s3.NewFromConfig(cfg) -// -// # Loading credentials with the Provider directly -// -// Another way to use the credentials process provider is by using the -// `NewProvider` constructor to create the provider and providing a it with a -// command to be executed to retrieve credentials. -// -// The following example creates a credentials provider for a command, and wraps -// it with the CredentialsCache before assigning the provider to the Amazon S3 API -// client's Credentials option. -// -// // Create credentials using the Provider. -// provider := processcreds.NewProvider("/path/to/command") -// -// // Create the service client value configured for credentials. -// svc := s3.New(s3.Options{ -// Credentials: aws.NewCredentialsCache(provider), -// }) -// -// If you need more control, you can set any configurable options in the -// credentials using one or more option functions. -// -// provider := processcreds.NewProvider("/path/to/command", -// func(o *processcreds.Options) { -// // Override the provider's default timeout -// o.Timeout = 2 * time.Minute -// }) -// -// You can also use your own `exec.Cmd` value by satisfying a value that satisfies -// the `NewCommandBuilder` interface and use the `NewProviderCommand` constructor. -// -// // Create an exec.Cmd -// cmdBuilder := processcreds.NewCommandBuilderFunc( -// func(ctx context.Context) (*exec.Cmd, error) { -// cmd := exec.CommandContext(ctx, -// "customCLICommand", -// "-a", "argument", -// ) -// cmd.Env = []string{ -// "ENV_VAR_FOO=value", -// "ENV_VAR_BAR=other_value", -// } -// -// return cmd, nil -// }, -// ) -// -// // Create credentials using your exec.Cmd and custom timeout -// provider := processcreds.NewProviderCommand(cmdBuilder, -// func(opt *processcreds.Provider) { -// // optionally override the provider's default timeout -// opt.Timeout = 1 * time.Second -// }) -package processcreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go deleted file mode 100644 index fe9345e287..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go +++ /dev/null @@ -1,281 +0,0 @@ -package processcreds - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "os" - "os/exec" - "runtime" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdkio" -) - -const ( - // ProviderName is the name this credentials provider will label any - // returned credentials Value with. - ProviderName = `ProcessProvider` - - // DefaultTimeout default limit on time a process can run. - DefaultTimeout = time.Duration(1) * time.Minute -) - -// ProviderError is an error indicating failure initializing or executing the -// process credentials provider -type ProviderError struct { - Err error -} - -// Error returns the error message. -func (e *ProviderError) Error() string { - return fmt.Sprintf("process provider error: %v", e.Err) -} - -// Unwrap returns the underlying error the provider error wraps. -func (e *ProviderError) Unwrap() error { - return e.Err -} - -// Provider satisfies the credentials.Provider interface, and is a -// client to retrieve credentials from a process. -type Provider struct { - // Provides a constructor for exec.Cmd that are invoked by the provider for - // retrieving credentials. Use this to provide custom creation of exec.Cmd - // with things like environment variables, or other configuration. - // - // The provider defaults to the DefaultNewCommand function. - commandBuilder NewCommandBuilder - - options Options -} - -// Options is the configuration options for configuring the Provider. -type Options struct { - // Timeout limits the time a process can run. - Timeout time.Duration -} - -// NewCommandBuilder provides the interface for specifying how command will be -// created that the Provider will use to retrieve credentials with. -type NewCommandBuilder interface { - NewCommand(context.Context) (*exec.Cmd, error) -} - -// NewCommandBuilderFunc provides a wrapper type around a function pointer to -// satisfy the NewCommandBuilder interface. -type NewCommandBuilderFunc func(context.Context) (*exec.Cmd, error) - -// NewCommand calls the underlying function pointer the builder was initialized with. -func (fn NewCommandBuilderFunc) NewCommand(ctx context.Context) (*exec.Cmd, error) { - return fn(ctx) -} - -// DefaultNewCommandBuilder provides the default NewCommandBuilder -// implementation used by the provider. It takes a command and arguments to -// invoke. The command will also be initialized with the current process -// environment variables, stderr, and stdin pipes. -type DefaultNewCommandBuilder struct { - Args []string -} - -// NewCommand returns an initialized exec.Cmd with the builder's initialized -// Args. The command is also initialized current process environment variables, -// stderr, and stdin pipes. -func (b DefaultNewCommandBuilder) NewCommand(ctx context.Context) (*exec.Cmd, error) { - var cmdArgs []string - if runtime.GOOS == "windows" { - cmdArgs = []string{"cmd.exe", "/C"} - } else { - cmdArgs = []string{"sh", "-c"} - } - - if len(b.Args) == 0 { - return nil, &ProviderError{ - Err: fmt.Errorf("failed to prepare command: command must not be empty"), - } - } - - cmdArgs = append(cmdArgs, b.Args...) - cmd := exec.CommandContext(ctx, cmdArgs[0], cmdArgs[1:]...) - cmd.Env = os.Environ() - - cmd.Stderr = os.Stderr // display stderr on console for MFA - cmd.Stdin = os.Stdin // enable stdin for MFA - - return cmd, nil -} - -// NewProvider returns a pointer to a new Credentials object wrapping the -// Provider. -// -// The provider defaults to the DefaultNewCommandBuilder for creating command -// the Provider will use to retrieve credentials with. -func NewProvider(command string, options ...func(*Options)) *Provider { - var args []string - - // Ensure that the command arguments are not set if the provided command is - // empty. This will error out when the command is executed since no - // arguments are specified. - if len(command) > 0 { - args = []string{command} - } - - commanBuilder := DefaultNewCommandBuilder{ - Args: args, - } - return NewProviderCommand(commanBuilder, options...) -} - -// NewProviderCommand returns a pointer to a new Credentials object with the -// specified command, and default timeout duration. Use this to provide custom -// creation of exec.Cmd for options like environment variables, or other -// configuration. -func NewProviderCommand(builder NewCommandBuilder, options ...func(*Options)) *Provider { - p := &Provider{ - commandBuilder: builder, - options: Options{ - Timeout: DefaultTimeout, - }, - } - - for _, option := range options { - option(&p.options) - } - - return p -} - -// A CredentialProcessResponse is the AWS credentials format that must be -// returned when executing an external credential_process. -type CredentialProcessResponse struct { - // As of this writing, the Version key must be set to 1. This might - // increment over time as the structure evolves. - Version int - - // The access key ID that identifies the temporary security credentials. - AccessKeyID string `json:"AccessKeyId"` - - // The secret access key that can be used to sign requests. - SecretAccessKey string - - // The token that users must pass to the service API to use the temporary credentials. - SessionToken string - - // The date on which the current credentials expire. - Expiration *time.Time -} - -// Retrieve executes the credential process command and returns the -// credentials, or error if the command fails. -func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { - out, err := p.executeCredentialProcess(ctx) - if err != nil { - return aws.Credentials{Source: ProviderName}, err - } - - // Serialize and validate response - resp := &CredentialProcessResponse{} - if err = json.Unmarshal(out, resp); err != nil { - return aws.Credentials{Source: ProviderName}, &ProviderError{ - Err: fmt.Errorf("parse failed of process output: %s, error: %w", out, err), - } - } - - if resp.Version != 1 { - return aws.Credentials{Source: ProviderName}, &ProviderError{ - Err: fmt.Errorf("wrong version in process output (not 1)"), - } - } - - if len(resp.AccessKeyID) == 0 { - return aws.Credentials{Source: ProviderName}, &ProviderError{ - Err: fmt.Errorf("missing AccessKeyId in process output"), - } - } - - if len(resp.SecretAccessKey) == 0 { - return aws.Credentials{Source: ProviderName}, &ProviderError{ - Err: fmt.Errorf("missing SecretAccessKey in process output"), - } - } - - creds := aws.Credentials{ - Source: ProviderName, - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.SessionToken, - } - - // Handle expiration - if resp.Expiration != nil { - creds.CanExpire = true - creds.Expires = *resp.Expiration - } - - return creds, nil -} - -// executeCredentialProcess starts the credential process on the OS and -// returns the results or an error. -func (p *Provider) executeCredentialProcess(ctx context.Context) ([]byte, error) { - if p.options.Timeout >= 0 { - var cancelFunc func() - ctx, cancelFunc = context.WithTimeout(ctx, p.options.Timeout) - defer cancelFunc() - } - - cmd, err := p.commandBuilder.NewCommand(ctx) - if err != nil { - return nil, err - } - - // get creds json on process's stdout - output := bytes.NewBuffer(make([]byte, 0, int(8*sdkio.KibiByte))) - if cmd.Stdout != nil { - cmd.Stdout = io.MultiWriter(cmd.Stdout, output) - } else { - cmd.Stdout = output - } - - execCh := make(chan error, 1) - go executeCommand(cmd, execCh) - - select { - case execError := <-execCh: - if execError == nil { - break - } - select { - case <-ctx.Done(): - return output.Bytes(), &ProviderError{ - Err: fmt.Errorf("credential process timed out: %w", execError), - } - default: - return output.Bytes(), &ProviderError{ - Err: fmt.Errorf("error in credential_process: %w", execError), - } - } - } - - out := output.Bytes() - if runtime.GOOS == "windows" { - // windows adds slashes to quotes - out = bytes.ReplaceAll(out, []byte(`\"`), []byte(`"`)) - } - - return out, nil -} - -func executeCommand(cmd *exec.Cmd, exec chan error) { - // Start the command - err := cmd.Start() - if err == nil { - err = cmd.Wait() - } - - exec <- err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go deleted file mode 100644 index ece1e65f73..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go +++ /dev/null @@ -1,81 +0,0 @@ -// Package ssocreds provides a credential provider for retrieving temporary AWS -// credentials using an SSO access token. -// -// IMPORTANT: The provider in this package does not initiate or perform the AWS -// SSO login flow. The SDK provider expects that you have already performed the -// SSO login flow using AWS CLI using the "aws sso login" command, or by some -// other mechanism. The provider must find a valid non-expired access token for -// the AWS SSO user portal URL in ~/.aws/sso/cache. If a cached token is not -// found, it is expired, or the file is malformed an error will be returned. -// -// # Loading AWS SSO credentials with the AWS shared configuration file -// -// You can use configure AWS SSO credentials from the AWS shared configuration file by -// specifying the required keys in the profile and referencing an sso-session: -// -// sso_session -// sso_account_id -// sso_role_name -// -// For example, the following defines a profile "devsso" and specifies the AWS -// SSO parameters that defines the target account, role, sign-on portal, and -// the region where the user portal is located. Note: all SSO arguments must be -// provided, or an error will be returned. -// -// [profile devsso] -// sso_session = dev-session -// sso_role_name = SSOReadOnlyRole -// sso_account_id = 123456789012 -// -// [sso-session dev-session] -// sso_start_url = https://my-sso-portal.awsapps.com/start -// sso_region = us-east-1 -// sso_registration_scopes = sso:account:access -// -// Using the config module, you can load the AWS SDK shared configuration, and -// specify that this profile be used to retrieve credentials. For example: -// -// config, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile("devsso")) -// if err != nil { -// return err -// } -// -// # Programmatically loading AWS SSO credentials directly -// -// You can programmatically construct the AWS SSO Provider in your application, -// and provide the necessary information to load and retrieve temporary -// credentials using an access token from ~/.aws/sso/cache. -// -// ssoClient := sso.NewFromConfig(cfg) -// ssoOidcClient := ssooidc.NewFromConfig(cfg) -// tokenPath, err := ssocreds.StandardCachedTokenFilepath("dev-session") -// if err != nil { -// return err -// } -// -// var provider aws.CredentialsProvider -// provider = ssocreds.New(ssoClient, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start", func(options *ssocreds.Options) { -// options.SSOTokenProvider = ssocreds.NewSSOTokenProvider(ssoOidcClient, tokenPath) -// }) -// -// // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time -// provider = aws.NewCredentialsCache(provider) -// -// credentials, err := provider.Retrieve(context.TODO()) -// if err != nil { -// return err -// } -// -// It is important that you wrap the Provider with aws.CredentialsCache if you -// are programmatically constructing the provider directly. This prevents your -// application from accessing the cached access token and requesting new -// credentials each time the credentials are used. -// -// # Additional Resources -// -// Configuring the AWS CLI to use AWS Single Sign-On: -// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html -// -// AWS Single Sign-On User Guide: -// https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html -package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go deleted file mode 100644 index 3b97e6dd40..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go +++ /dev/null @@ -1,233 +0,0 @@ -package ssocreds - -import ( - "crypto/sha1" - "encoding/hex" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/internal/shareddefaults" -) - -var osUserHomeDur = shareddefaults.UserHomeDir - -// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or -// error if unable get derive the path. Key that will be used to compute a SHA1 -// value that is hex encoded. -// -// Derives the filepath using the Key as: -// -// ~/.aws/sso/cache/.json -func StandardCachedTokenFilepath(key string) (string, error) { - homeDir := osUserHomeDur() - if len(homeDir) == 0 { - return "", fmt.Errorf("unable to get USER's home directory for cached token") - } - hash := sha1.New() - if _, err := hash.Write([]byte(key)); err != nil { - return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %w", err) - } - - cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json" - - return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil -} - -type tokenKnownFields struct { - AccessToken string `json:"accessToken,omitempty"` - ExpiresAt *rfc3339 `json:"expiresAt,omitempty"` - - RefreshToken string `json:"refreshToken,omitempty"` - ClientID string `json:"clientId,omitempty"` - ClientSecret string `json:"clientSecret,omitempty"` -} - -type token struct { - tokenKnownFields - UnknownFields map[string]interface{} `json:"-"` -} - -func (t token) MarshalJSON() ([]byte, error) { - fields := map[string]interface{}{} - - setTokenFieldString(fields, "accessToken", t.AccessToken) - setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt) - - setTokenFieldString(fields, "refreshToken", t.RefreshToken) - setTokenFieldString(fields, "clientId", t.ClientID) - setTokenFieldString(fields, "clientSecret", t.ClientSecret) - - for k, v := range t.UnknownFields { - if _, ok := fields[k]; ok { - return nil, fmt.Errorf("unknown token field %v, duplicates known field", k) - } - fields[k] = v - } - - return json.Marshal(fields) -} - -func setTokenFieldString(fields map[string]interface{}, key, value string) { - if value == "" { - return - } - fields[key] = value -} -func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) { - if value == nil { - return - } - fields[key] = value -} - -func (t *token) UnmarshalJSON(b []byte) error { - var fields map[string]interface{} - if err := json.Unmarshal(b, &fields); err != nil { - return nil - } - - t.UnknownFields = map[string]interface{}{} - - for k, v := range fields { - var err error - switch k { - case "accessToken": - err = getTokenFieldString(v, &t.AccessToken) - case "expiresAt": - err = getTokenFieldRFC3339(v, &t.ExpiresAt) - case "refreshToken": - err = getTokenFieldString(v, &t.RefreshToken) - case "clientId": - err = getTokenFieldString(v, &t.ClientID) - case "clientSecret": - err = getTokenFieldString(v, &t.ClientSecret) - default: - t.UnknownFields[k] = v - } - - if err != nil { - return fmt.Errorf("field %q, %w", k, err) - } - } - - return nil -} - -func getTokenFieldString(v interface{}, value *string) error { - var ok bool - *value, ok = v.(string) - if !ok { - return fmt.Errorf("expect value to be string, got %T", v) - } - return nil -} - -func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { - var stringValue string - if err := getTokenFieldString(v, &stringValue); err != nil { - return err - } - - timeValue, err := parseRFC3339(stringValue) - if err != nil { - return err - } - - *value = &timeValue - return nil -} - -func loadCachedToken(filename string) (token, error) { - fileBytes, err := ioutil.ReadFile(filename) - if err != nil { - return token{}, fmt.Errorf("failed to read cached SSO token file, %w", err) - } - - var t token - if err := json.Unmarshal(fileBytes, &t); err != nil { - return token{}, fmt.Errorf("failed to parse cached SSO token file, %w", err) - } - - if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() { - return token{}, fmt.Errorf( - "cached SSO token must contain accessToken and expiresAt fields") - } - - return t, nil -} - -func storeCachedToken(filename string, t token, fileMode os.FileMode) (err error) { - tmpFilename := filename + ".tmp-" + strconv.FormatInt(sdk.NowTime().UnixNano(), 10) - if err := writeCacheFile(tmpFilename, fileMode, t); err != nil { - return err - } - - if err := os.Rename(tmpFilename, filename); err != nil { - return fmt.Errorf("failed to replace old cached SSO token file, %w", err) - } - - return nil -} - -func writeCacheFile(filename string, fileMode os.FileMode, t token) (err error) { - var f *os.File - f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) - if err != nil { - return fmt.Errorf("failed to create cached SSO token file %w", err) - } - - defer func() { - closeErr := f.Close() - if err == nil && closeErr != nil { - err = fmt.Errorf("failed to close cached SSO token file, %w", closeErr) - } - }() - - encoder := json.NewEncoder(f) - - if err = encoder.Encode(t); err != nil { - return fmt.Errorf("failed to serialize cached SSO token, %w", err) - } - - return nil -} - -type rfc3339 time.Time - -func parseRFC3339(v string) (rfc3339, error) { - parsed, err := time.Parse(time.RFC3339, v) - if err != nil { - return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %w", err) - } - - return rfc3339(parsed), nil -} - -func (r *rfc3339) UnmarshalJSON(bytes []byte) (err error) { - var value string - - // Use JSON unmarshal to unescape the quoted value making use of JSON's - // unquoting rules. - if err = json.Unmarshal(bytes, &value); err != nil { - return err - } - - *r, err = parseRFC3339(value) - - return nil -} - -func (r *rfc3339) MarshalJSON() ([]byte, error) { - value := time.Time(*r).Format(time.RFC3339) - - // Use JSON unmarshal to unescape the quoted value making use of JSON's - // quoting rules. - return json.Marshal(value) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go deleted file mode 100644 index b3cf7853e7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go +++ /dev/null @@ -1,152 +0,0 @@ -package ssocreds - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/service/sso" -) - -// ProviderName is the name of the provider used to specify the source of -// credentials. -const ProviderName = "SSOProvider" - -// GetRoleCredentialsAPIClient is a API client that implements the -// GetRoleCredentials operation. -type GetRoleCredentialsAPIClient interface { - GetRoleCredentials(context.Context, *sso.GetRoleCredentialsInput, ...func(*sso.Options)) ( - *sso.GetRoleCredentialsOutput, error, - ) -} - -// Options is the Provider options structure. -type Options struct { - // The Client which is configured for the AWS Region where the AWS SSO user - // portal is located. - Client GetRoleCredentialsAPIClient - - // The AWS account that is assigned to the user. - AccountID string - - // The role name that is assigned to the user. - RoleName string - - // The URL that points to the organization's AWS Single Sign-On (AWS SSO) - // user portal. - StartURL string - - // The filepath the cached token will be retrieved from. If unset Provider will - // use the startURL to determine the filepath at. - // - // ~/.aws/sso/cache/.json - // - // If custom cached token filepath is used, the Provider's startUrl - // parameter will be ignored. - CachedTokenFilepath string - - // Used by the SSOCredentialProvider if a token configuration - // profile is used in the shared config - SSOTokenProvider *SSOTokenProvider -} - -// Provider is an AWS credential provider that retrieves temporary AWS -// credentials by exchanging an SSO login token. -type Provider struct { - options Options - - cachedTokenFilepath string -} - -// New returns a new AWS Single Sign-On (AWS SSO) credential provider. The -// provided client is expected to be configured for the AWS Region where the -// AWS SSO user portal is located. -func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL string, optFns ...func(options *Options)) *Provider { - options := Options{ - Client: client, - AccountID: accountID, - RoleName: roleName, - StartURL: startURL, - } - - for _, fn := range optFns { - fn(&options) - } - - return &Provider{ - options: options, - cachedTokenFilepath: options.CachedTokenFilepath, - } -} - -// Retrieve retrieves temporary AWS credentials from the configured Amazon -// Single Sign-On (AWS SSO) user portal by exchanging the accessToken present -// in ~/.aws/sso/cache. However, if a token provider configuration exists -// in the shared config, then we ought to use the token provider rather then -// direct access on the cached token. -func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { - var accessToken *string - if p.options.SSOTokenProvider != nil { - token, err := p.options.SSOTokenProvider.RetrieveBearerToken(ctx) - if err != nil { - return aws.Credentials{}, err - } - accessToken = &token.Value - } else { - if p.cachedTokenFilepath == "" { - cachedTokenFilepath, err := StandardCachedTokenFilepath(p.options.StartURL) - if err != nil { - return aws.Credentials{}, &InvalidTokenError{Err: err} - } - p.cachedTokenFilepath = cachedTokenFilepath - } - - tokenFile, err := loadCachedToken(p.cachedTokenFilepath) - if err != nil { - return aws.Credentials{}, &InvalidTokenError{Err: err} - } - - if tokenFile.ExpiresAt == nil || sdk.NowTime().After(time.Time(*tokenFile.ExpiresAt)) { - return aws.Credentials{}, &InvalidTokenError{} - } - accessToken = &tokenFile.AccessToken - } - - output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{ - AccessToken: accessToken, - AccountId: &p.options.AccountID, - RoleName: &p.options.RoleName, - }) - if err != nil { - return aws.Credentials{}, err - } - - return aws.Credentials{ - AccessKeyID: aws.ToString(output.RoleCredentials.AccessKeyId), - SecretAccessKey: aws.ToString(output.RoleCredentials.SecretAccessKey), - SessionToken: aws.ToString(output.RoleCredentials.SessionToken), - CanExpire: true, - Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(), - Source: ProviderName, - }, nil -} - -// InvalidTokenError is the error type that is returned if loaded token has -// expired or is otherwise invalid. To refresh the SSO session run AWS SSO -// login with the corresponding profile. -type InvalidTokenError struct { - Err error -} - -func (i *InvalidTokenError) Unwrap() error { - return i.Err -} - -func (i *InvalidTokenError) Error() string { - const msg = "the SSO session has expired or is invalid" - if i.Err == nil { - return msg - } - return msg + ": " + i.Err.Error() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go deleted file mode 100644 index 7f4fc54677..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go +++ /dev/null @@ -1,147 +0,0 @@ -package ssocreds - -import ( - "context" - "fmt" - "os" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/service/ssooidc" - "github.com/aws/smithy-go/auth/bearer" -) - -// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API -// client for calling CreateToken operation to refresh the SSO token. -type CreateTokenAPIClient interface { - CreateToken(context.Context, *ssooidc.CreateTokenInput, ...func(*ssooidc.Options)) ( - *ssooidc.CreateTokenOutput, error, - ) -} - -// SSOTokenProviderOptions provides the options for configuring the -// SSOTokenProvider. -type SSOTokenProviderOptions struct { - // Client that can be overridden - Client CreateTokenAPIClient - - // The set of API Client options to be applied when invoking the - // CreateToken operation. - ClientOptions []func(*ssooidc.Options) - - // The path the file containing the cached SSO token will be read from. - // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter. - CachedTokenFilepath string -} - -// SSOTokenProvider provides an utility for refreshing SSO AccessTokens for -// Bearer Authentication. The SSOTokenProvider can only be used to refresh -// already cached SSO Tokens. This utility cannot perform the initial SSO -// create token. -// -// The SSOTokenProvider is not safe to use concurrently. It must be wrapped in -// a utility such as smithy-go's auth/bearer#TokenCache. The SDK's -// config.LoadDefaultConfig will automatically wrap the SSOTokenProvider with -// the smithy-go TokenCache, if the external configuration loaded configured -// for an SSO session. -// -// The initial SSO create token should be preformed with the AWS CLI before the -// Go application using the SSOTokenProvider will need to retrieve the SSO -// token. If the AWS CLI has not created the token cache file, this provider -// will return an error when attempting to retrieve the cached token. -// -// This provider will attempt to refresh the cached SSO token periodically if -// needed when RetrieveBearerToken is called. -// -// A utility such as the AWS CLI must be used to initially create the SSO -// session and cached token file. -// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html -type SSOTokenProvider struct { - options SSOTokenProviderOptions -} - -var _ bearer.TokenProvider = (*SSOTokenProvider)(nil) - -// NewSSOTokenProvider returns an initialized SSOTokenProvider that will -// periodically refresh the SSO token cached stored in the cachedTokenFilepath. -// The cachedTokenFilepath file's content will be rewritten by the token -// provider when the token is refreshed. -// -// The client must be configured for the AWS region the SSO token was created for. -func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider { - options := SSOTokenProviderOptions{ - Client: client, - CachedTokenFilepath: cachedTokenFilepath, - } - for _, fn := range optFns { - fn(&options) - } - - provider := &SSOTokenProvider{ - options: options, - } - - return provider -} - -// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath -// the SSOTokenProvider was created with. If the token has expired -// RetrieveBearerToken will attempt to refresh it. If the token cannot be -// refreshed or is not present an error will be returned. -// -// A utility such as the AWS CLI must be used to initially create the SSO -// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html -func (p SSOTokenProvider) RetrieveBearerToken(ctx context.Context) (bearer.Token, error) { - cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath) - if err != nil { - return bearer.Token{}, err - } - - if cachedToken.ExpiresAt != nil && sdk.NowTime().After(time.Time(*cachedToken.ExpiresAt)) { - cachedToken, err = p.refreshToken(ctx, cachedToken) - if err != nil { - return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %w", err) - } - } - - expiresAt := aws.ToTime((*time.Time)(cachedToken.ExpiresAt)) - return bearer.Token{ - Value: cachedToken.AccessToken, - CanExpire: !expiresAt.IsZero(), - Expires: expiresAt, - }, nil -} - -func (p SSOTokenProvider) refreshToken(ctx context.Context, cachedToken token) (token, error) { - if cachedToken.ClientSecret == "" || cachedToken.ClientID == "" || cachedToken.RefreshToken == "" { - return token{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed") - } - - createResult, err := p.options.Client.CreateToken(ctx, &ssooidc.CreateTokenInput{ - ClientId: &cachedToken.ClientID, - ClientSecret: &cachedToken.ClientSecret, - RefreshToken: &cachedToken.RefreshToken, - GrantType: aws.String("refresh_token"), - }, p.options.ClientOptions...) - if err != nil { - return token{}, fmt.Errorf("unable to refresh SSO token, %w", err) - } - - expiresAt := sdk.NowTime().Add(time.Duration(createResult.ExpiresIn) * time.Second) - - cachedToken.AccessToken = aws.ToString(createResult.AccessToken) - cachedToken.ExpiresAt = (*rfc3339)(&expiresAt) - cachedToken.RefreshToken = aws.ToString(createResult.RefreshToken) - - fileInfo, err := os.Stat(p.options.CachedTokenFilepath) - if err != nil { - return token{}, fmt.Errorf("failed to stat cached SSO token file %w", err) - } - - if err = storeCachedToken(p.options.CachedTokenFilepath, cachedToken, fileInfo.Mode()); err != nil { - return token{}, fmt.Errorf("unable to cache refreshed SSO token, %w", err) - } - - return cachedToken, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go deleted file mode 100644 index d525cac096..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go +++ /dev/null @@ -1,53 +0,0 @@ -package credentials - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -const ( - // StaticCredentialsName provides a name of Static provider - StaticCredentialsName = "StaticCredentials" -) - -// StaticCredentialsEmptyError is emitted when static credentials are empty. -type StaticCredentialsEmptyError struct{} - -func (*StaticCredentialsEmptyError) Error() string { - return "static credentials are empty" -} - -// A StaticCredentialsProvider is a set of credentials which are set, and will -// never expire. -type StaticCredentialsProvider struct { - Value aws.Credentials -} - -// NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS -// credentials passed in. -func NewStaticCredentialsProvider(key, secret, session string) StaticCredentialsProvider { - return StaticCredentialsProvider{ - Value: aws.Credentials{ - AccessKeyID: key, - SecretAccessKey: secret, - SessionToken: session, - }, - } -} - -// Retrieve returns the credentials or error if the credentials are invalid. -func (s StaticCredentialsProvider) Retrieve(_ context.Context) (aws.Credentials, error) { - v := s.Value - if v.AccessKeyID == "" || v.SecretAccessKey == "" { - return aws.Credentials{ - Source: StaticCredentialsName, - }, &StaticCredentialsEmptyError{} - } - - if len(v.Source) == 0 { - v.Source = StaticCredentialsName - } - - return v, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go deleted file mode 100644 index 289707b6de..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go +++ /dev/null @@ -1,320 +0,0 @@ -// Package stscreds are credential Providers to retrieve STS AWS credentials. -// -// STS provides multiple ways to retrieve credentials which can be used when making -// future AWS service API operation calls. -// -// The SDK will ensure that per instance of credentials.Credentials all requests -// to refresh the credentials will be synchronized. But, the SDK is unable to -// ensure synchronous usage of the AssumeRoleProvider if the value is shared -// between multiple Credentials or service clients. -// -// # Assume Role -// -// To assume an IAM role using STS with the SDK you can create a new Credentials -// with the SDKs's stscreds package. -// -// // Initial credentials loaded from SDK's default credential chain. Such as -// // the environment, shared credentials (~/.aws/credentials), or EC2 Instance -// // Role. These credentials will be used to to make the STS Assume Role API. -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } -// -// // Create the credentials from AssumeRoleProvider to assume the role -// // referenced by the "myRoleARN" ARN. -// stsSvc := sts.NewFromConfig(cfg) -// creds := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn") -// -// cfg.Credentials = aws.NewCredentialsCache(creds) -// -// // Create service client value configured for credentials -// // from assumed role. -// svc := s3.NewFromConfig(cfg) -// -// # Assume Role with custom MFA Token provider -// -// To assume an IAM role with a MFA token you can either specify a custom MFA -// token provider or use the SDK's built in StdinTokenProvider that will prompt -// the user for a token code each time the credentials need to to be refreshed. -// Specifying a custom token provider allows you to control where the token -// code is retrieved from, and how it is refreshed. -// -// With a custom token provider, the provider is responsible for refreshing the -// token code when called. -// -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } -// -// staticTokenProvider := func() (string, error) { -// return someTokenCode, nil -// } -// -// // Create the credentials from AssumeRoleProvider to assume the role -// // referenced by the "myRoleARN" ARN using the MFA token code provided. -// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { -// o.SerialNumber = aws.String("myTokenSerialNumber") -// o.TokenProvider = staticTokenProvider -// }) -// -// cfg.Credentials = aws.NewCredentialsCache(creds) -// -// // Create service client value configured for credentials -// // from assumed role. -// svc := s3.NewFromConfig(cfg) -// -// # Assume Role with MFA Token Provider -// -// To assume an IAM role with MFA for longer running tasks where the credentials -// may need to be refreshed setting the TokenProvider field of AssumeRoleProvider -// will allow the credential provider to prompt for new MFA token code when the -// role's credentials need to be refreshed. -// -// The StdinTokenProvider function is available to prompt on stdin to retrieve -// the MFA token code from the user. You can also implement custom prompts by -// satisfying the TokenProvider function signature. -// -// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -// have undesirable results as the StdinTokenProvider will not be synchronized. A -// single Credentials with an AssumeRoleProvider can be shared safely. -// -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } -// -// // Create the credentials from AssumeRoleProvider to assume the role -// // referenced by the "myRoleARN" ARN using the MFA token code provided. -// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { -// o.SerialNumber = aws.String("myTokenSerialNumber") -// o.TokenProvider = stscreds.StdinTokenProvider -// }) -// -// cfg.Credentials = aws.NewCredentialsCache(creds) -// -// // Create service client value configured for credentials -// // from assumed role. -// svc := s3.NewFromConfig(cfg) -package stscreds - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/sts" - "github.com/aws/aws-sdk-go-v2/service/sts/types" -) - -// StdinTokenProvider will prompt on stdout and read from stdin for a string value. -// An error is returned if reading from stdin fails. -// -// Use this function go read MFA tokens from stdin. The function makes no attempt -// to make atomic prompts from stdin across multiple gorouties. -// -// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -// have undesirable results as the StdinTokenProvider will not be synchronized. A -// single Credentials with an AssumeRoleProvider can be shared safely -// -// Will wait forever until something is provided on the stdin. -func StdinTokenProvider() (string, error) { - var v string - fmt.Printf("Assume Role MFA token code: ") - _, err := fmt.Scanln(&v) - - return v, err -} - -// ProviderName provides a name of AssumeRole provider -const ProviderName = "AssumeRoleProvider" - -// AssumeRoleAPIClient is a client capable of the STS AssumeRole operation. -type AssumeRoleAPIClient interface { - AssumeRole(ctx context.Context, params *sts.AssumeRoleInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleOutput, error) -} - -// DefaultDuration is the default amount of time in minutes that the -// credentials will be valid for. This value is only used by AssumeRoleProvider -// for specifying the default expiry duration of an assume role. -// -// Other providers such as WebIdentityRoleProvider do not use this value, and -// instead rely on STS API's default parameter handing to assign a default -// value. -var DefaultDuration = time.Duration(15) * time.Minute - -// AssumeRoleProvider retrieves temporary credentials from the STS service, and -// keeps track of their expiration time. -// -// This credential provider will be used by the SDKs default credential change -// when shared configuration is enabled, and the shared config or shared credentials -// file configure assume role. See Session docs for how to do this. -// -// AssumeRoleProvider does not provide any synchronization and it is not safe -// to share this value across multiple Credentials, Sessions, or service clients -// without also sharing the same Credentials instance. -type AssumeRoleProvider struct { - options AssumeRoleOptions -} - -// AssumeRoleOptions is the configurable options for AssumeRoleProvider -type AssumeRoleOptions struct { - // Client implementation of the AssumeRole operation. Required - Client AssumeRoleAPIClient - - // IAM Role ARN to be assumed. Required - RoleARN string - - // Session name, if you wish to uniquely identify this session. - RoleSessionName string - - // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // Optional ExternalID to pass along, defaults to nil if not set. - ExternalID *string - - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string - - // The ARNs of IAM managed policies you want to use as managed session policies. - // The policies must exist in the same account as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyARNs []types.PolicyDescriptorType - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - SerialNumber *string - - // The source identity specified by the principal that is calling the AssumeRole - // operation. You can require users to specify a source identity when they assume a - // role. You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition key - // to further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. - SourceIdentity *string - - // Async method of providing MFA token code for assuming an IAM role with MFA. - // The value returned by the function will be used as the TokenCode in the Retrieve - // call. See StdinTokenProvider for a provider that prompts and reads from stdin. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed when SerialNumber is set. - TokenProvider func() (string, error) - - // A list of session tags that you want to pass. Each session tag consists of a key - // name and an associated value. For more information about session tags, see - // Tagging STS Sessions - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the - // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. - Tags []types.Tag - - // A list of keys for session tags that you want to set as transitive. If you set a - // tag key as transitive, the corresponding key and value passes to subsequent - // sessions in a role chain. For more information, see Chaining Roles with Session - // Tags - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) - // in the IAM User Guide. This parameter is optional. - TransitiveTagKeys []string -} - -// NewAssumeRoleProvider constructs and returns a credentials provider that -// will retrieve credentials by assuming a IAM role using STS. -func NewAssumeRoleProvider(client AssumeRoleAPIClient, roleARN string, optFns ...func(*AssumeRoleOptions)) *AssumeRoleProvider { - o := AssumeRoleOptions{ - Client: client, - RoleARN: roleARN, - } - - for _, fn := range optFns { - fn(&o) - } - - return &AssumeRoleProvider{ - options: o, - } -} - -// Retrieve generates a new set of temporary credentials using STS. -func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { - // Apply defaults where parameters are not set. - if len(p.options.RoleSessionName) == 0 { - // Try to work out a role name that will hopefully end up unique. - p.options.RoleSessionName = fmt.Sprintf("aws-go-sdk-%d", time.Now().UTC().UnixNano()) - } - if p.options.Duration == 0 { - // Expire as often as AWS permits. - p.options.Duration = DefaultDuration - } - input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int32(int32(p.options.Duration / time.Second)), - PolicyArns: p.options.PolicyARNs, - RoleArn: aws.String(p.options.RoleARN), - RoleSessionName: aws.String(p.options.RoleSessionName), - ExternalId: p.options.ExternalID, - SourceIdentity: p.options.SourceIdentity, - Tags: p.options.Tags, - TransitiveTagKeys: p.options.TransitiveTagKeys, - } - if p.options.Policy != nil { - input.Policy = p.options.Policy - } - if p.options.SerialNumber != nil { - if p.options.TokenProvider != nil { - input.SerialNumber = p.options.SerialNumber - code, err := p.options.TokenProvider() - if err != nil { - return aws.Credentials{}, err - } - input.TokenCode = aws.String(code) - } else { - return aws.Credentials{}, fmt.Errorf("assume role with MFA enabled, but TokenProvider is not set") - } - } - - resp, err := p.options.Client.AssumeRole(ctx, input) - if err != nil { - return aws.Credentials{Source: ProviderName}, err - } - - return aws.Credentials{ - AccessKeyID: *resp.Credentials.AccessKeyId, - SecretAccessKey: *resp.Credentials.SecretAccessKey, - SessionToken: *resp.Credentials.SessionToken, - Source: ProviderName, - - CanExpire: true, - Expires: *resp.Credentials.Expiration, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go deleted file mode 100644 index ddaf6df6ce..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go +++ /dev/null @@ -1,150 +0,0 @@ -package stscreds - -import ( - "context" - "fmt" - "io/ioutil" - "strconv" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/service/sts" - "github.com/aws/aws-sdk-go-v2/service/sts/types" -) - -var invalidIdentityTokenExceptionCode = (&types.InvalidIdentityTokenException{}).ErrorCode() - -const ( - // WebIdentityProviderName is the web identity provider name - WebIdentityProviderName = "WebIdentityCredentials" -) - -// AssumeRoleWithWebIdentityAPIClient is a client capable of the STS AssumeRoleWithWebIdentity operation. -type AssumeRoleWithWebIdentityAPIClient interface { - AssumeRoleWithWebIdentity(ctx context.Context, params *sts.AssumeRoleWithWebIdentityInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleWithWebIdentityOutput, error) -} - -// WebIdentityRoleProvider is used to retrieve credentials using -// an OIDC token. -type WebIdentityRoleProvider struct { - options WebIdentityRoleOptions -} - -// WebIdentityRoleOptions is a structure of configurable options for WebIdentityRoleProvider -type WebIdentityRoleOptions struct { - // Client implementation of the AssumeRoleWithWebIdentity operation. Required - Client AssumeRoleWithWebIdentityAPIClient - - // JWT Token Provider. Required - TokenRetriever IdentityTokenRetriever - - // IAM Role ARN to assume. Required - RoleARN string - - // Session name, if you wish to uniquely identify this session. - RoleSessionName string - - // Expiry duration of the STS credentials. STS will assign a default expiry - // duration if this value is unset. This is different from the Duration - // option of AssumeRoleProvider, which automatically assigns 15 minutes if - // Duration is unset. - // - // See the STS AssumeRoleWithWebIdentity API reference guide for more - // information on defaults. - // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html - Duration time.Duration - - // An IAM policy in JSON format that you want to use as an inline session policy. - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you - // want to use as managed session policies. The policies must exist in the - // same account as the role. - PolicyARNs []types.PolicyDescriptorType -} - -// IdentityTokenRetriever is an interface for retrieving a JWT -type IdentityTokenRetriever interface { - GetIdentityToken() ([]byte, error) -} - -// IdentityTokenFile is for retrieving an identity token from the given file name -type IdentityTokenFile string - -// GetIdentityToken retrieves the JWT token from the file and returns the contents as a []byte -func (j IdentityTokenFile) GetIdentityToken() ([]byte, error) { - b, err := ioutil.ReadFile(string(j)) - if err != nil { - return nil, fmt.Errorf("unable to read file at %s: %v", string(j), err) - } - - return b, nil -} - -// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the -// provided stsiface.ClientAPI -func NewWebIdentityRoleProvider(client AssumeRoleWithWebIdentityAPIClient, roleARN string, tokenRetriever IdentityTokenRetriever, optFns ...func(*WebIdentityRoleOptions)) *WebIdentityRoleProvider { - o := WebIdentityRoleOptions{ - Client: client, - RoleARN: roleARN, - TokenRetriever: tokenRetriever, - } - - for _, fn := range optFns { - fn(&o) - } - - return &WebIdentityRoleProvider{options: o} -} - -// Retrieve will attempt to assume a role from a token which is located at -// 'WebIdentityTokenFilePath' specified destination and if that is empty an -// error will be returned. -func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { - b, err := p.options.TokenRetriever.GetIdentityToken() - if err != nil { - return aws.Credentials{}, fmt.Errorf("failed to retrieve jwt from provide source, %w", err) - } - - sessionName := p.options.RoleSessionName - if len(sessionName) == 0 { - // session name is used to uniquely identify a session. This simply - // uses unix time in nanoseconds to uniquely identify sessions. - sessionName = strconv.FormatInt(sdk.NowTime().UnixNano(), 10) - } - input := &sts.AssumeRoleWithWebIdentityInput{ - PolicyArns: p.options.PolicyARNs, - RoleArn: &p.options.RoleARN, - RoleSessionName: &sessionName, - WebIdentityToken: aws.String(string(b)), - } - if p.options.Duration != 0 { - // If set use the value, otherwise STS will assign a default expiration duration. - input.DurationSeconds = aws.Int32(int32(p.options.Duration / time.Second)) - } - if p.options.Policy != nil { - input.Policy = p.options.Policy - } - - resp, err := p.options.Client.AssumeRoleWithWebIdentity(ctx, input, func(options *sts.Options) { - options.Retryer = retry.AddWithErrorCodes(options.Retryer, invalidIdentityTokenExceptionCode) - }) - if err != nil { - return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err) - } - - // InvalidIdentityToken error is a temporary error that can occur - // when assuming an Role with a JWT web identity token. - - value := aws.Credentials{ - AccessKeyID: aws.ToString(resp.Credentials.AccessKeyId), - SecretAccessKey: aws.ToString(resp.Credentials.SecretAccessKey), - SessionToken: aws.ToString(resp.Credentials.SessionToken), - Source: WebIdentityProviderName, - CanExpire: true, - Expires: *resp.Credentials.Expiration, - } - return value, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md deleted file mode 100644 index e07fb5ca70..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ /dev/null @@ -1,311 +0,0 @@ -# v1.16.0 (2024-03-21) - -* **Feature**: Add config switch `DisableDefaultTimeout` that allows you to disable the default operation timeout (5 seconds) for IMDS calls. - -# v1.15.4 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.3 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.11 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.10 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.9 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.8 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.7 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.6 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.5 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.4 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.3 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.2 (2023-11-02) - -* No change notes available for this release. - -# v1.14.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.13 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.12 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.11 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.10 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.9 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.8 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.7 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.6 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.5 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2023-03-14) - -* **Feature**: Add flag to disable IMDSv1 fallback - -# v1.12.24 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.23 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.22 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.21 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.20 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.19 (2022-10-24) - -* **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.18 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.17 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.16 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.15 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.14 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.13 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.12 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.11 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.10 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.9 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.6 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-11-06) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-10-11) - -* **Feature**: Respect passed in Context Deadline/Timeout. Updates the IMDS Client operations to not override the passed in Context's Deadline or Timeout options. If an Client operation is called with a Context with a Deadline or Timeout, the client will no longer override it with the client's default timeout. -* **Bug Fix**: Fix IMDS client's response handling and operation timeout race. Fixes #1253 -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.1 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-08-04) - -* **Feature**: adds error handling for defered close calls -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-07-15) - -* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go deleted file mode 100644 index 3f4a10e2c1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go +++ /dev/null @@ -1,352 +0,0 @@ -package imds - -import ( - "context" - "fmt" - "net" - "net/http" - "os" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalconfig "github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// ServiceID provides the unique name of this API client -const ServiceID = "ec2imds" - -// Client provides the API client for interacting with the Amazon EC2 Instance -// Metadata Service API. -type Client struct { - options Options -} - -// ClientEnableState provides an enumeration if the client is enabled, -// disabled, or default behavior. -type ClientEnableState = internalconfig.ClientEnableState - -// Enumeration values for ClientEnableState -const ( - ClientDefaultEnableState ClientEnableState = internalconfig.ClientDefaultEnableState // default behavior - ClientDisabled ClientEnableState = internalconfig.ClientDisabled // client disabled - ClientEnabled ClientEnableState = internalconfig.ClientEnabled // client enabled -) - -// EndpointModeState is an enum configuration variable describing the client endpoint mode. -// Not configurable directly, but used when using the NewFromConfig. -type EndpointModeState = internalconfig.EndpointModeState - -// Enumeration values for EndpointModeState -const ( - EndpointModeStateUnset EndpointModeState = internalconfig.EndpointModeStateUnset - EndpointModeStateIPv4 EndpointModeState = internalconfig.EndpointModeStateIPv4 - EndpointModeStateIPv6 EndpointModeState = internalconfig.EndpointModeStateIPv6 -) - -const ( - disableClientEnvVar = "AWS_EC2_METADATA_DISABLED" - - // Client endpoint options - endpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" - - defaultIPv4Endpoint = "http://169.254.169.254" - defaultIPv6Endpoint = "http://[fd00:ec2::254]" -) - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - options.HTTPClient = resolveHTTPClient(options.HTTPClient) - - if options.Retryer == nil { - options.Retryer = retry.NewStandard() - } - options.Retryer = retry.AddWithMaxBackoffDelay(options.Retryer, 1*time.Second) - - if options.ClientEnableState == ClientDefaultEnableState { - if v := os.Getenv(disableClientEnvVar); strings.EqualFold(v, "true") { - options.ClientEnableState = ClientDisabled - } - } - - if len(options.Endpoint) == 0 { - if v := os.Getenv(endpointEnvVar); len(v) != 0 { - options.Endpoint = v - } - } - - client := &Client{ - options: options, - } - - if client.options.tokenProvider == nil && !client.options.disableAPIToken { - client.options.tokenProvider = newTokenProvider(client, defaultTokenTTL) - } - - return client -} - -// NewFromConfig returns an initialized Client based the AWS SDK config, and -// functional options. Provide additional functional options to further -// configure the behavior of the client, such as changing the client's endpoint -// or adding custom middleware behavior. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - APIOptions: append([]func(*middleware.Stack) error{}, cfg.APIOptions...), - HTTPClient: cfg.HTTPClient, - ClientLogMode: cfg.ClientLogMode, - Logger: cfg.Logger, - } - - if cfg.Retryer != nil { - opts.Retryer = cfg.Retryer() - } - - resolveClientEnableState(cfg, &opts) - resolveEndpointConfig(cfg, &opts) - resolveEndpointModeConfig(cfg, &opts) - resolveEnableFallback(cfg, &opts) - - return New(opts, optFns...) -} - -// Options provides the fields for configuring the API client's behavior. -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation - // call to modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The endpoint the client will use to retrieve EC2 instance metadata. - // - // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EndpointMode. - // - // If unset, and the environment variable AWS_EC2_METADATA_SERVICE_ENDPOINT - // has a value the client will use the value of the environment variable as - // the endpoint for operation calls. - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] - Endpoint string - - // The endpoint selection mode the client will use if no explicit endpoint is provided using the Endpoint field. - // - // Setting EndpointMode to EndpointModeStateIPv4 will configure the client to use the default EC2 IPv4 endpoint. - // Setting EndpointMode to EndpointModeStateIPv6 will configure the client to use the default EC2 IPv6 endpoint. - // - // By default if EndpointMode is not set (EndpointModeStateUnset) than the default endpoint selection mode EndpointModeStateIPv4. - EndpointMode EndpointModeState - - // The HTTP client to invoke API calls with. Defaults to client's default - // HTTP implementation if nil. - HTTPClient HTTPClient - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. - Retryer aws.Retryer - - // Changes if the EC2 Instance Metadata client is enabled or not. Client - // will default to enabled if not set to ClientDisabled. When the client is - // disabled it will return an error for all operation calls. - // - // If ClientEnableState value is ClientDefaultEnableState (default value), - // and the environment variable "AWS_EC2_METADATA_DISABLED" is set to - // "true", the client will be disabled. - // - // AWS_EC2_METADATA_DISABLED=true - ClientEnableState ClientEnableState - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // Configure IMDSv1 fallback behavior. By default, the client will attempt - // to fall back to IMDSv1 as needed for backwards compatibility. When set to [aws.FalseTernary] - // the client will return any errors encountered from attempting to fetch a token - // instead of silently using the insecure data flow of IMDSv1. - // - // See [configuring IMDS] for more information. - // - // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html - EnableFallback aws.Ternary - - // By default, all IMDS client operations enforce a 5-second timeout. You - // can disable that behavior with this setting. - DisableDefaultTimeout bool - - // provides the caching of API tokens used for operation calls. If unset, - // the API token will not be retrieved for the operation. - tokenProvider *tokenProvider - - // option to disable the API token provider for testing. - disableAPIToken bool -} - -// HTTPClient provides the interface for a client making HTTP requests with the -// API. -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// Copy creates a copy of the API options. -func (o Options) Copy() Options { - to := o - to.APIOptions = append([]func(*middleware.Stack) error{}, o.APIOptions...) - return to -} - -// WithAPIOptions wraps the API middleware functions, as a functional option -// for the API Client Options. Use this helper to add additional functional -// options to the API client, or operation calls. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -func (c *Client) invokeOperation( - ctx context.Context, opID string, params interface{}, optFns []func(*Options), - stackFns ...func(*middleware.Stack, Options) error, -) ( - result interface{}, metadata middleware.Metadata, err error, -) { - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - for _, fn := range optFns { - fn(&options) - } - - if options.ClientEnableState == ClientDisabled { - return nil, metadata, &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: fmt.Errorf( - "access disabled to EC2 IMDS via client option, or %q environment variable", - disableClientEnvVar), - } - } - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) - if err != nil { - return nil, metadata, &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - - return result, metadata, err -} - -const ( - // HTTP client constants - defaultDialerTimeout = 250 * time.Millisecond - defaultResponseHeaderTimeout = 500 * time.Millisecond -) - -func resolveHTTPClient(client HTTPClient) HTTPClient { - if client == nil { - client = awshttp.NewBuildableClient() - } - - if c, ok := client.(*awshttp.BuildableClient); ok { - client = c. - WithDialerOptions(func(d *net.Dialer) { - // Use a custom Dial timeout for the EC2 Metadata service to account - // for the possibility the application might not be running in an - // environment with the service present. The client should fail fast in - // this case. - d.Timeout = defaultDialerTimeout - }). - WithTransportOptions(func(tr *http.Transport) { - // Use a custom Transport timeout for the EC2 Metadata service to - // account for the possibility that the application might be running in - // a container, and EC2Metadata service drops the connection after a - // single IP Hop. The client should fail fast in this case. - tr.ResponseHeaderTimeout = defaultResponseHeaderTimeout - }) - } - - return client -} - -func resolveClientEnableState(cfg aws.Config, options *Options) error { - if options.ClientEnableState != ClientDefaultEnableState { - return nil - } - value, found, err := internalconfig.ResolveClientEnableState(cfg.ConfigSources) - if err != nil || !found { - return err - } - options.ClientEnableState = value - return nil -} - -func resolveEndpointModeConfig(cfg aws.Config, options *Options) error { - if options.EndpointMode != EndpointModeStateUnset { - return nil - } - value, found, err := internalconfig.ResolveEndpointModeConfig(cfg.ConfigSources) - if err != nil || !found { - return err - } - options.EndpointMode = value - return nil -} - -func resolveEndpointConfig(cfg aws.Config, options *Options) error { - if len(options.Endpoint) != 0 { - return nil - } - value, found, err := internalconfig.ResolveEndpointConfig(cfg.ConfigSources) - if err != nil || !found { - return err - } - options.Endpoint = value - return nil -} - -func resolveEnableFallback(cfg aws.Config, options *Options) { - if options.EnableFallback != aws.UnknownTernary { - return - } - - disabled, ok := internalconfig.ResolveV1FallbackDisabled(cfg.ConfigSources) - if !ok { - return - } - - if disabled { - options.EnableFallback = aws.FalseTernary - } else { - options.EnableFallback = aws.TrueTernary - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go deleted file mode 100644 index af58b6bb10..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go +++ /dev/null @@ -1,77 +0,0 @@ -package imds - -import ( - "context" - "fmt" - "io" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getDynamicDataPath = "/latest/dynamic" - -// GetDynamicData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *Client) GetDynamicData(ctx context.Context, params *GetDynamicDataInput, optFns ...func(*Options)) (*GetDynamicDataOutput, error) { - if params == nil { - params = &GetDynamicDataInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetDynamicData", params, optFns, - addGetDynamicDataMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetDynamicDataOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetDynamicDataInput provides the input parameters for the GetDynamicData -// operation. -type GetDynamicDataInput struct { - // The relative dynamic data path to retrieve. Can be empty string to - // retrieve a response containing a new line separated list of dynamic data - // resources available. - // - // Must not include the dynamic data base path. - // - // May include leading slash. If Path includes trailing slash the trailing - // slash will be included in the request for the resource. - Path string -} - -// GetDynamicDataOutput provides the output parameters for the GetDynamicData -// operation. -type GetDynamicDataOutput struct { - Content io.ReadCloser - - ResultMetadata middleware.Metadata -} - -func addGetDynamicDataMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetDynamicData", - buildGetDynamicDataPath, - buildGetDynamicDataOutput) -} - -func buildGetDynamicDataPath(params interface{}) (string, error) { - p, ok := params.(*GetDynamicDataInput) - if !ok { - return "", fmt.Errorf("unknown parameter type %T", params) - } - - return appendURIPath(getDynamicDataPath, p.Path), nil -} - -func buildGetDynamicDataOutput(resp *smithyhttp.Response) (interface{}, error) { - return &GetDynamicDataOutput{ - Content: resp.Body, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go deleted file mode 100644 index 5111cc90ca..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go +++ /dev/null @@ -1,103 +0,0 @@ -package imds - -import ( - "context" - "encoding/json" - "fmt" - "io" - "strings" - "time" - - "github.com/aws/smithy-go" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getIAMInfoPath = getMetadataPath + "/iam/info" - -// GetIAMInfo retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *Client) GetIAMInfo( - ctx context.Context, params *GetIAMInfoInput, optFns ...func(*Options), -) ( - *GetIAMInfoOutput, error, -) { - if params == nil { - params = &GetIAMInfoInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetIAMInfo", params, optFns, - addGetIAMInfoMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetIAMInfoOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetIAMInfoInput provides the input parameters for GetIAMInfo operation. -type GetIAMInfoInput struct{} - -// GetIAMInfoOutput provides the output parameters for GetIAMInfo operation. -type GetIAMInfoOutput struct { - IAMInfo - - ResultMetadata middleware.Metadata -} - -func addGetIAMInfoMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetIAMInfo", - buildGetIAMInfoPath, - buildGetIAMInfoOutput, - ) -} - -func buildGetIAMInfoPath(params interface{}) (string, error) { - return getIAMInfoPath, nil -} - -func buildGetIAMInfoOutput(resp *smithyhttp.Response) (v interface{}, err error) { - defer func() { - closeErr := resp.Body.Close() - if err == nil { - err = closeErr - } else if closeErr != nil { - err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) - } - }() - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(resp.Body, ringBuffer) - - imdsResult := &GetIAMInfoOutput{} - if err = json.NewDecoder(body).Decode(&imdsResult.IAMInfo); err != nil { - return nil, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode instance identity document, %w", err), - Snapshot: ringBuffer.Bytes(), - } - } - // Any code other success is an error - if !strings.EqualFold(imdsResult.Code, "success") { - return nil, fmt.Errorf("failed to get EC2 IMDS IAM info, %s", - imdsResult.Code) - } - - return imdsResult, nil -} - -// IAMInfo provides the shape for unmarshaling an IAM info from the metadata -// API. -type IAMInfo struct { - Code string - LastUpdated time.Time - InstanceProfileArn string - InstanceProfileID string -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go deleted file mode 100644 index dc8c09edf0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go +++ /dev/null @@ -1,110 +0,0 @@ -package imds - -import ( - "context" - "encoding/json" - "fmt" - "io" - "time" - - "github.com/aws/smithy-go" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getInstanceIdentityDocumentPath = getDynamicDataPath + "/instance-identity/document" - -// GetInstanceIdentityDocument retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *Client) GetInstanceIdentityDocument( - ctx context.Context, params *GetInstanceIdentityDocumentInput, optFns ...func(*Options), -) ( - *GetInstanceIdentityDocumentOutput, error, -) { - if params == nil { - params = &GetInstanceIdentityDocumentInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetInstanceIdentityDocument", params, optFns, - addGetInstanceIdentityDocumentMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetInstanceIdentityDocumentOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetInstanceIdentityDocumentInput provides the input parameters for -// GetInstanceIdentityDocument operation. -type GetInstanceIdentityDocumentInput struct{} - -// GetInstanceIdentityDocumentOutput provides the output parameters for -// GetInstanceIdentityDocument operation. -type GetInstanceIdentityDocumentOutput struct { - InstanceIdentityDocument - - ResultMetadata middleware.Metadata -} - -func addGetInstanceIdentityDocumentMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetInstanceIdentityDocument", - buildGetInstanceIdentityDocumentPath, - buildGetInstanceIdentityDocumentOutput, - ) -} - -func buildGetInstanceIdentityDocumentPath(params interface{}) (string, error) { - return getInstanceIdentityDocumentPath, nil -} - -func buildGetInstanceIdentityDocumentOutput(resp *smithyhttp.Response) (v interface{}, err error) { - defer func() { - closeErr := resp.Body.Close() - if err == nil { - err = closeErr - } else if closeErr != nil { - err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) - } - }() - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(resp.Body, ringBuffer) - - output := &GetInstanceIdentityDocumentOutput{} - if err = json.NewDecoder(body).Decode(&output.InstanceIdentityDocument); err != nil { - return nil, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode instance identity document, %w", err), - Snapshot: ringBuffer.Bytes(), - } - } - - return output, nil -} - -// InstanceIdentityDocument provides the shape for unmarshaling -// an instance identity document -type InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - MarketplaceProductCodes []string `json:"marketplaceProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go deleted file mode 100644 index 869bfc9feb..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go +++ /dev/null @@ -1,77 +0,0 @@ -package imds - -import ( - "context" - "fmt" - "io" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getMetadataPath = "/latest/meta-data" - -// GetMetadata uses the path provided to request information from the Amazon -// EC2 Instance Metadata Service. The content will be returned as a string, or -// error if the request failed. -func (c *Client) GetMetadata(ctx context.Context, params *GetMetadataInput, optFns ...func(*Options)) (*GetMetadataOutput, error) { - if params == nil { - params = &GetMetadataInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetMetadata", params, optFns, - addGetMetadataMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetMetadataOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetMetadataInput provides the input parameters for the GetMetadata -// operation. -type GetMetadataInput struct { - // The relative metadata path to retrieve. Can be empty string to retrieve - // a response containing a new line separated list of metadata resources - // available. - // - // Must not include the metadata base path. - // - // May include leading slash. If Path includes trailing slash the trailing slash - // will be included in the request for the resource. - Path string -} - -// GetMetadataOutput provides the output parameters for the GetMetadata -// operation. -type GetMetadataOutput struct { - Content io.ReadCloser - - ResultMetadata middleware.Metadata -} - -func addGetMetadataMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetMetadata", - buildGetMetadataPath, - buildGetMetadataOutput) -} - -func buildGetMetadataPath(params interface{}) (string, error) { - p, ok := params.(*GetMetadataInput) - if !ok { - return "", fmt.Errorf("unknown parameter type %T", params) - } - - return appendURIPath(getMetadataPath, p.Path), nil -} - -func buildGetMetadataOutput(resp *smithyhttp.Response) (interface{}, error) { - return &GetMetadataOutput{ - Content: resp.Body, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go deleted file mode 100644 index 8c0572bb5c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go +++ /dev/null @@ -1,73 +0,0 @@ -package imds - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// GetRegion retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *Client) GetRegion( - ctx context.Context, params *GetRegionInput, optFns ...func(*Options), -) ( - *GetRegionOutput, error, -) { - if params == nil { - params = &GetRegionInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetRegion", params, optFns, - addGetRegionMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetRegionOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetRegionInput provides the input parameters for GetRegion operation. -type GetRegionInput struct{} - -// GetRegionOutput provides the output parameters for GetRegion operation. -type GetRegionOutput struct { - Region string - - ResultMetadata middleware.Metadata -} - -func addGetRegionMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetRegion", - buildGetInstanceIdentityDocumentPath, - buildGetRegionOutput, - ) -} - -func buildGetRegionOutput(resp *smithyhttp.Response) (interface{}, error) { - out, err := buildGetInstanceIdentityDocumentOutput(resp) - if err != nil { - return nil, err - } - - result, ok := out.(*GetInstanceIdentityDocumentOutput) - if !ok { - return nil, fmt.Errorf("unexpected instance identity document type, %T", out) - } - - region := result.Region - if len(region) == 0 { - return "", fmt.Errorf("instance metadata did not return a region value") - } - - return &GetRegionOutput{ - Region: region, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go deleted file mode 100644 index 1f9ee97a5b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go +++ /dev/null @@ -1,119 +0,0 @@ -package imds - -import ( - "context" - "fmt" - "io" - "strconv" - "strings" - "time" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getTokenPath = "/latest/api/token" -const tokenTTLHeader = "X-Aws-Ec2-Metadata-Token-Ttl-Seconds" - -// getToken uses the duration to return a token for EC2 IMDS, or an error if -// the request failed. -func (c *Client) getToken(ctx context.Context, params *getTokenInput, optFns ...func(*Options)) (*getTokenOutput, error) { - if params == nil { - params = &getTokenInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "getToken", params, optFns, - addGetTokenMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*getTokenOutput) - out.ResultMetadata = metadata - return out, nil -} - -type getTokenInput struct { - TokenTTL time.Duration -} - -type getTokenOutput struct { - Token string - TokenTTL time.Duration - - ResultMetadata middleware.Metadata -} - -func addGetTokenMiddleware(stack *middleware.Stack, options Options) error { - err := addRequestMiddleware(stack, - options, - "PUT", - "GetToken", - buildGetTokenPath, - buildGetTokenOutput) - if err != nil { - return err - } - - err = stack.Serialize.Add(&tokenTTLRequestHeader{}, middleware.After) - if err != nil { - return err - } - - return nil -} - -func buildGetTokenPath(interface{}) (string, error) { - return getTokenPath, nil -} - -func buildGetTokenOutput(resp *smithyhttp.Response) (v interface{}, err error) { - defer func() { - closeErr := resp.Body.Close() - if err == nil { - err = closeErr - } else if closeErr != nil { - err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) - } - }() - - ttlHeader := resp.Header.Get(tokenTTLHeader) - tokenTTL, err := strconv.ParseInt(ttlHeader, 10, 64) - if err != nil { - return nil, fmt.Errorf("unable to parse API token, %w", err) - } - - var token strings.Builder - if _, err = io.Copy(&token, resp.Body); err != nil { - return nil, fmt.Errorf("unable to read API token, %w", err) - } - - return &getTokenOutput{ - Token: token.String(), - TokenTTL: time.Duration(tokenTTL) * time.Second, - }, nil -} - -type tokenTTLRequestHeader struct{} - -func (*tokenTTLRequestHeader) ID() string { return "tokenTTLRequestHeader" } -func (*tokenTTLRequestHeader) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("expect HTTP transport, got %T", in.Request) - } - - input, ok := in.Parameters.(*getTokenInput) - if !ok { - return out, metadata, fmt.Errorf("expect getTokenInput, got %T", in.Parameters) - } - - req.Header.Set(tokenTTLHeader, strconv.Itoa(int(input.TokenTTL/time.Second))) - - return next.HandleSerialize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go deleted file mode 100644 index 8903697244..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go +++ /dev/null @@ -1,61 +0,0 @@ -package imds - -import ( - "context" - "io" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getUserDataPath = "/latest/user-data" - -// GetUserData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *Client) GetUserData(ctx context.Context, params *GetUserDataInput, optFns ...func(*Options)) (*GetUserDataOutput, error) { - if params == nil { - params = &GetUserDataInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetUserData", params, optFns, - addGetUserDataMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetUserDataOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetUserDataInput provides the input parameters for the GetUserData -// operation. -type GetUserDataInput struct{} - -// GetUserDataOutput provides the output parameters for the GetUserData -// operation. -type GetUserDataOutput struct { - Content io.ReadCloser - - ResultMetadata middleware.Metadata -} - -func addGetUserDataMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetUserData", - buildGetUserDataPath, - buildGetUserDataOutput) -} - -func buildGetUserDataPath(params interface{}) (string, error) { - return getUserDataPath, nil -} - -func buildGetUserDataOutput(resp *smithyhttp.Response) (interface{}, error) { - return &GetUserDataOutput{ - Content: resp.Body, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go deleted file mode 100644 index ad283cf825..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go +++ /dev/null @@ -1,48 +0,0 @@ -package imds - -import ( - "context" - "github.com/aws/smithy-go/middleware" -) - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} - -type signRequestMiddleware struct { -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go deleted file mode 100644 index d5765c36b1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package imds provides the API client for interacting with the Amazon EC2 -// Instance Metadata Service. -// -// All Client operation calls have a default timeout. If the operation is not -// completed before this timeout expires, the operation will be canceled. This -// timeout can be overridden through the following: -// - Set the options flag DisableDefaultTimeout -// - Provide a Context with a timeout or deadline with calling the client's operations. -// -// See the EC2 IMDS user guide for more information on using the API. -// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html -package imds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go deleted file mode 100644 index d7540da348..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go +++ /dev/null @@ -1,20 +0,0 @@ -package imds - -import ( - "context" - "github.com/aws/smithy-go/middleware" -) - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go deleted file mode 100644 index a44cd1b79f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package imds - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go deleted file mode 100644 index ce77455893..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go +++ /dev/null @@ -1,114 +0,0 @@ -package config - -import ( - "fmt" - "strings" -) - -// ClientEnableState provides an enumeration if the client is enabled, -// disabled, or default behavior. -type ClientEnableState uint - -// Enumeration values for ClientEnableState -const ( - ClientDefaultEnableState ClientEnableState = iota - ClientDisabled - ClientEnabled -) - -// EndpointModeState is the EC2 IMDS Endpoint Configuration Mode -type EndpointModeState uint - -// Enumeration values for ClientEnableState -const ( - EndpointModeStateUnset EndpointModeState = iota - EndpointModeStateIPv4 - EndpointModeStateIPv6 -) - -// SetFromString sets the EndpointModeState based on the provided string value. Unknown values will default to EndpointModeStateUnset -func (e *EndpointModeState) SetFromString(v string) error { - v = strings.TrimSpace(v) - - switch { - case len(v) == 0: - *e = EndpointModeStateUnset - case strings.EqualFold(v, "IPv6"): - *e = EndpointModeStateIPv6 - case strings.EqualFold(v, "IPv4"): - *e = EndpointModeStateIPv4 - default: - return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4") - } - return nil -} - -// ClientEnableStateResolver is a config resolver interface for retrieving whether the IMDS client is disabled. -type ClientEnableStateResolver interface { - GetEC2IMDSClientEnableState() (ClientEnableState, bool, error) -} - -// EndpointModeResolver is a config resolver interface for retrieving the EndpointModeState configuration. -type EndpointModeResolver interface { - GetEC2IMDSEndpointMode() (EndpointModeState, bool, error) -} - -// EndpointResolver is a config resolver interface for retrieving the endpoint. -type EndpointResolver interface { - GetEC2IMDSEndpoint() (string, bool, error) -} - -type v1FallbackDisabledResolver interface { - GetEC2IMDSV1FallbackDisabled() (bool, bool) -} - -// ResolveClientEnableState resolves the ClientEnableState from a list of configuration sources. -func ResolveClientEnableState(sources []interface{}) (value ClientEnableState, found bool, err error) { - for _, source := range sources { - if resolver, ok := source.(ClientEnableStateResolver); ok { - value, found, err = resolver.GetEC2IMDSClientEnableState() - if err != nil || found { - return value, found, err - } - } - } - return value, found, err -} - -// ResolveEndpointModeConfig resolves the EndpointModeState from a list of configuration sources. -func ResolveEndpointModeConfig(sources []interface{}) (value EndpointModeState, found bool, err error) { - for _, source := range sources { - if resolver, ok := source.(EndpointModeResolver); ok { - value, found, err = resolver.GetEC2IMDSEndpointMode() - if err != nil || found { - return value, found, err - } - } - } - return value, found, err -} - -// ResolveEndpointConfig resolves the endpoint from a list of configuration sources. -func ResolveEndpointConfig(sources []interface{}) (value string, found bool, err error) { - for _, source := range sources { - if resolver, ok := source.(EndpointResolver); ok { - value, found, err = resolver.GetEC2IMDSEndpoint() - if err != nil || found { - return value, found, err - } - } - } - return value, found, err -} - -// ResolveV1FallbackDisabled ... -func ResolveV1FallbackDisabled(sources []interface{}) (bool, bool) { - for _, source := range sources { - if resolver, ok := source.(v1FallbackDisabledResolver); ok { - if v, found := resolver.GetEC2IMDSV1FallbackDisabled(); found { - return v, true - } - } - } - return false, false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go deleted file mode 100644 index 90cf4aeb3d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go +++ /dev/null @@ -1,313 +0,0 @@ -package imds - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "net/url" - "path" - "time" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -func addAPIRequestMiddleware(stack *middleware.Stack, - options Options, - operation string, - getPath func(interface{}) (string, error), - getOutput func(*smithyhttp.Response) (interface{}, error), -) (err error) { - err = addRequestMiddleware(stack, options, "GET", operation, getPath, getOutput) - if err != nil { - return err - } - - // Token Serializer build and state management. - if !options.disableAPIToken { - err = stack.Finalize.Insert(options.tokenProvider, (*retry.Attempt)(nil).ID(), middleware.After) - if err != nil { - return err - } - - err = stack.Deserialize.Insert(options.tokenProvider, "OperationDeserializer", middleware.Before) - if err != nil { - return err - } - } - - return nil -} - -func addRequestMiddleware(stack *middleware.Stack, - options Options, - method string, - operation string, - getPath func(interface{}) (string, error), - getOutput func(*smithyhttp.Response) (interface{}, error), -) (err error) { - err = awsmiddleware.AddSDKAgentKey(awsmiddleware.FeatureMetadata, "ec2-imds")(stack) - if err != nil { - return err - } - - // Operation timeout - err = stack.Initialize.Add(&operationTimeout{ - Disabled: options.DisableDefaultTimeout, - DefaultTimeout: defaultOperationTimeout, - }, middleware.Before) - if err != nil { - return err - } - - // Operation Serializer - err = stack.Serialize.Add(&serializeRequest{ - GetPath: getPath, - Method: method, - }, middleware.After) - if err != nil { - return err - } - - // Operation endpoint resolver - err = stack.Serialize.Insert(&resolveEndpoint{ - Endpoint: options.Endpoint, - EndpointMode: options.EndpointMode, - }, "OperationSerializer", middleware.Before) - if err != nil { - return err - } - - // Operation Deserializer - err = stack.Deserialize.Add(&deserializeResponse{ - GetOutput: getOutput, - }, middleware.After) - if err != nil { - return err - } - - err = stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: options.ClientLogMode.IsRequest(), - LogRequestWithBody: options.ClientLogMode.IsRequestWithBody(), - LogResponse: options.ClientLogMode.IsResponse(), - LogResponseWithBody: options.ClientLogMode.IsResponseWithBody(), - }, middleware.After) - if err != nil { - return err - } - - err = addSetLoggerMiddleware(stack, options) - if err != nil { - return err - } - - if err := addProtocolFinalizerMiddlewares(stack, options, operation); err != nil { - return fmt.Errorf("add protocol finalizers: %w", err) - } - - // Retry support - return retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{ - Retryer: options.Retryer, - LogRetryAttempts: options.ClientLogMode.IsRetries(), - }) -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -type serializeRequest struct { - GetPath func(interface{}) (string, error) - Method string -} - -func (*serializeRequest) ID() string { - return "OperationSerializer" -} - -func (m *serializeRequest) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - reqPath, err := m.GetPath(in.Parameters) - if err != nil { - return out, metadata, fmt.Errorf("unable to get request URL path, %w", err) - } - - request.Request.URL.Path = reqPath - request.Request.Method = m.Method - - return next.HandleSerialize(ctx, in) -} - -type deserializeResponse struct { - GetOutput func(*smithyhttp.Response) (interface{}, error) -} - -func (*deserializeResponse) ID() string { - return "OperationDeserializer" -} - -func (m *deserializeResponse) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, fmt.Errorf( - "unexpected transport response type, %T, want %T", out.RawResponse, resp) - } - defer resp.Body.Close() - - // read the full body so that any operation timeouts cleanup will not race - // the body being read. - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return out, metadata, fmt.Errorf("read response body failed, %w", err) - } - resp.Body = ioutil.NopCloser(bytes.NewReader(body)) - - // Anything that's not 200 |< 300 is error - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return out, metadata, &smithyhttp.ResponseError{ - Response: resp, - Err: fmt.Errorf("request to EC2 IMDS failed"), - } - } - - result, err := m.GetOutput(resp) - if err != nil { - return out, metadata, fmt.Errorf( - "unable to get deserialized result for response, %w", err, - ) - } - out.Result = result - - return out, metadata, err -} - -type resolveEndpoint struct { - Endpoint string - EndpointMode EndpointModeState -} - -func (*resolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *resolveEndpoint) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - var endpoint string - if len(m.Endpoint) > 0 { - endpoint = m.Endpoint - } else { - switch m.EndpointMode { - case EndpointModeStateIPv6: - endpoint = defaultIPv6Endpoint - case EndpointModeStateIPv4: - fallthrough - case EndpointModeStateUnset: - endpoint = defaultIPv4Endpoint - default: - return out, metadata, fmt.Errorf("unsupported IMDS endpoint mode") - } - } - - req.URL, err = url.Parse(endpoint) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - return next.HandleSerialize(ctx, in) -} - -const ( - defaultOperationTimeout = 5 * time.Second -) - -// operationTimeout adds a timeout on the middleware stack if the Context the -// stack was called with does not have a deadline. The next middleware must -// complete before the timeout, or the context will be canceled. -// -// If DefaultTimeout is zero, no default timeout will be used if the Context -// does not have a timeout. -// -// The next middleware must also ensure that any resources that are also -// canceled by the stack's context are completely consumed before returning. -// Otherwise the timeout cleanup will race the resource being consumed -// upstream. -type operationTimeout struct { - Disabled bool - DefaultTimeout time.Duration -} - -func (*operationTimeout) ID() string { return "OperationTimeout" } - -func (m *operationTimeout) HandleInitialize( - ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, -) ( - output middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.Disabled { - return next.HandleInitialize(ctx, input) - } - - if _, ok := ctx.Deadline(); !ok && m.DefaultTimeout != 0 { - var cancelFn func() - ctx, cancelFn = context.WithTimeout(ctx, m.DefaultTimeout) - defer cancelFn() - } - - return next.HandleInitialize(ctx, input) -} - -// appendURIPath joins a URI path component to the existing path with `/` -// separators between the path components. If the path being added ends with a -// trailing `/` that slash will be maintained. -func appendURIPath(base, add string) string { - reqPath := path.Join(base, add) - if len(add) != 0 && add[len(add)-1] == '/' { - reqPath += "/" - } - return reqPath -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %w", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %w", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go deleted file mode 100644 index 5703c6e16a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go +++ /dev/null @@ -1,261 +0,0 @@ -package imds - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/logging" - "net/http" - "sync" - "sync/atomic" - "time" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const ( - // Headers for Token and TTL - tokenHeader = "x-aws-ec2-metadata-token" - defaultTokenTTL = 5 * time.Minute -) - -type tokenProvider struct { - client *Client - tokenTTL time.Duration - - token *apiToken - tokenMux sync.RWMutex - - disabled uint32 // Atomic updated -} - -func newTokenProvider(client *Client, ttl time.Duration) *tokenProvider { - return &tokenProvider{ - client: client, - tokenTTL: ttl, - } -} - -// apiToken provides the API token used by all operation calls for th EC2 -// Instance metadata service. -type apiToken struct { - token string - expires time.Time -} - -var timeNow = time.Now - -// Expired returns if the token is expired. -func (t *apiToken) Expired() bool { - // Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry - // time is always based on reported wall-clock time. - return timeNow().Round(0).After(t.expires) -} - -func (t *tokenProvider) ID() string { return "APITokenProvider" } - -// HandleFinalize is the finalize stack middleware, that if the token provider is -// enabled, will attempt to add the cached API token to the request. If the API -// token is not cached, it will be retrieved in a separate API call, getToken. -// -// For retry attempts, handler must be added after attempt retryer. -// -// If request for getToken fails the token provider may be disabled from future -// requests, depending on the response status code. -func (t *tokenProvider) HandleFinalize( - ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if t.fallbackEnabled() && !t.enabled() { - // short-circuits to insecure data flow if token provider is disabled. - return next.HandleFinalize(ctx, input) - } - - req, ok := input.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport request type %T", input.Request) - } - - tok, err := t.getToken(ctx) - if err != nil { - // If the error allows the token to downgrade to insecure flow allow that. - var bypassErr *bypassTokenRetrievalError - if errors.As(err, &bypassErr) { - return next.HandleFinalize(ctx, input) - } - - return out, metadata, fmt.Errorf("failed to get API token, %w", err) - } - - req.Header.Set(tokenHeader, tok.token) - - return next.HandleFinalize(ctx, input) -} - -// HandleDeserialize is the deserialize stack middleware for determining if the -// operation the token provider is decorating failed because of a 401 -// unauthorized status code. If the operation failed for that reason the token -// provider needs to be re-enabled so that it can start adding the API token to -// operation calls. -func (t *tokenProvider) HandleDeserialize( - ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, input) - if err == nil { - return out, metadata, err - } - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, fmt.Errorf("expect HTTP transport, got %T", out.RawResponse) - } - - if resp.StatusCode == http.StatusUnauthorized { // unauthorized - t.enable() - err = &retryableError{Err: err, isRetryable: true} - } - - return out, metadata, err -} - -func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) { - if t.fallbackEnabled() && !t.enabled() { - return nil, &bypassTokenRetrievalError{ - Err: fmt.Errorf("cannot get API token, provider disabled"), - } - } - - t.tokenMux.RLock() - tok = t.token - t.tokenMux.RUnlock() - - if tok != nil && !tok.Expired() { - return tok, nil - } - - tok, err = t.updateToken(ctx) - if err != nil { - return nil, err - } - - return tok, nil -} - -func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) { - t.tokenMux.Lock() - defer t.tokenMux.Unlock() - - // Prevent multiple requests to update retrieving the token. - if t.token != nil && !t.token.Expired() { - tok := t.token - return tok, nil - } - - result, err := t.client.getToken(ctx, &getTokenInput{ - TokenTTL: t.tokenTTL, - }) - if err != nil { - var statusErr interface{ HTTPStatusCode() int } - if errors.As(err, &statusErr) { - switch statusErr.HTTPStatusCode() { - // Disable future get token if failed because of 403, 404, or 405 - case http.StatusForbidden, - http.StatusNotFound, - http.StatusMethodNotAllowed: - - if t.fallbackEnabled() { - logger := middleware.GetLogger(ctx) - logger.Logf(logging.Warn, "falling back to IMDSv1: %v", err) - t.disable() - } - - // 400 errors are terminal, and need to be upstreamed - case http.StatusBadRequest: - return nil, err - } - } - - // Disable if request send failed or timed out getting response - var re *smithyhttp.RequestSendError - var ce *smithy.CanceledError - if errors.As(err, &re) || errors.As(err, &ce) { - atomic.StoreUint32(&t.disabled, 1) - } - - if !t.fallbackEnabled() { - // NOTE: getToken() is an implementation detail of some outer operation - // (e.g. GetMetadata). It has its own retries that have already been exhausted. - // Mark the underlying error as a terminal error. - err = &retryableError{Err: err, isRetryable: false} - return nil, err - } - - // Token couldn't be retrieved, fallback to IMDSv1 insecure flow for this request - // and allow the request to proceed. Future requests _may_ re-attempt fetching a - // token if not disabled. - return nil, &bypassTokenRetrievalError{Err: err} - } - - tok := &apiToken{ - token: result.Token, - expires: timeNow().Add(result.TokenTTL), - } - t.token = tok - - return tok, nil -} - -// enabled returns if the token provider is current enabled or not. -func (t *tokenProvider) enabled() bool { - return atomic.LoadUint32(&t.disabled) == 0 -} - -// fallbackEnabled returns false if EnableFallback is [aws.FalseTernary], true otherwise -func (t *tokenProvider) fallbackEnabled() bool { - switch t.client.options.EnableFallback { - case aws.FalseTernary: - return false - default: - return true - } -} - -// disable disables the token provider and it will no longer attempt to inject -// the token, nor request updates. -func (t *tokenProvider) disable() { - atomic.StoreUint32(&t.disabled, 1) -} - -// enable enables the token provide to start refreshing tokens, and adding them -// to the pending request. -func (t *tokenProvider) enable() { - t.tokenMux.Lock() - t.token = nil - t.tokenMux.Unlock() - atomic.StoreUint32(&t.disabled, 0) -} - -type bypassTokenRetrievalError struct { - Err error -} - -func (e *bypassTokenRetrievalError) Error() string { - return fmt.Sprintf("bypass token retrieval, %v", e.Err) -} - -func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err } - -type retryableError struct { - Err error - isRetryable bool -} - -func (e *retryableError) RetryableError() bool { return e.isRetryable } - -func (e *retryableError) Error() string { return e.Err.Error() } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go deleted file mode 100644 index 0b81db5480..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go +++ /dev/null @@ -1,45 +0,0 @@ -package auth - -import ( - "github.com/aws/smithy-go/auth" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// HTTPAuthScheme is the SDK's internal implementation of smithyhttp.AuthScheme -// for pre-existing implementations where the signer was added to client -// config. SDK clients will key off of this type and ensure per-operation -// updates to those signers persist on the scheme itself. -type HTTPAuthScheme struct { - schemeID string - signer smithyhttp.Signer -} - -var _ smithyhttp.AuthScheme = (*HTTPAuthScheme)(nil) - -// NewHTTPAuthScheme returns an auth scheme instance with the given config. -func NewHTTPAuthScheme(schemeID string, signer smithyhttp.Signer) *HTTPAuthScheme { - return &HTTPAuthScheme{ - schemeID: schemeID, - signer: signer, - } -} - -// SchemeID identifies the auth scheme. -func (s *HTTPAuthScheme) SchemeID() string { - return s.schemeID -} - -// IdentityResolver gets the identity resolver for the auth scheme. -func (s *HTTPAuthScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver { - return o.GetIdentityResolver(s.schemeID) -} - -// Signer gets the signer for the auth scheme. -func (s *HTTPAuthScheme) Signer() smithyhttp.Signer { - return s.signer -} - -// WithSigner returns a new instance of the auth scheme with the updated signer. -func (s *HTTPAuthScheme) WithSigner(signer smithyhttp.Signer) *HTTPAuthScheme { - return NewHTTPAuthScheme(s.schemeID, signer) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go deleted file mode 100644 index bbc2ec06ec..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go +++ /dev/null @@ -1,191 +0,0 @@ -package auth - -import ( - "context" - "fmt" - - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -// SigV4 is a constant representing -// Authentication Scheme Signature Version 4 -const SigV4 = "sigv4" - -// SigV4A is a constant representing -// Authentication Scheme Signature Version 4A -const SigV4A = "sigv4a" - -// SigV4S3Express identifies the S3 S3Express auth scheme. -const SigV4S3Express = "sigv4-s3express" - -// None is a constant representing the -// None Authentication Scheme -const None = "none" - -// SupportedSchemes is a data structure -// that indicates the list of supported AWS -// authentication schemes -var SupportedSchemes = map[string]bool{ - SigV4: true, - SigV4A: true, - SigV4S3Express: true, - None: true, -} - -// AuthenticationScheme is a representation of -// AWS authentication schemes -type AuthenticationScheme interface { - isAuthenticationScheme() -} - -// AuthenticationSchemeV4 is a AWS SigV4 representation -type AuthenticationSchemeV4 struct { - Name string - SigningName *string - SigningRegion *string - DisableDoubleEncoding *bool -} - -func (a *AuthenticationSchemeV4) isAuthenticationScheme() {} - -// AuthenticationSchemeV4A is a AWS SigV4A representation -type AuthenticationSchemeV4A struct { - Name string - SigningName *string - SigningRegionSet []string - DisableDoubleEncoding *bool -} - -func (a *AuthenticationSchemeV4A) isAuthenticationScheme() {} - -// AuthenticationSchemeNone is a representation for the none auth scheme -type AuthenticationSchemeNone struct{} - -func (a *AuthenticationSchemeNone) isAuthenticationScheme() {} - -// NoAuthenticationSchemesFoundError is used in signaling -// that no authentication schemes have been specified. -type NoAuthenticationSchemesFoundError struct{} - -func (e *NoAuthenticationSchemesFoundError) Error() string { - return fmt.Sprint("No authentication schemes specified.") -} - -// UnSupportedAuthenticationSchemeSpecifiedError is used in -// signaling that only unsupported authentication schemes -// were specified. -type UnSupportedAuthenticationSchemeSpecifiedError struct { - UnsupportedSchemes []string -} - -func (e *UnSupportedAuthenticationSchemeSpecifiedError) Error() string { - return fmt.Sprint("Unsupported authentication scheme specified.") -} - -// GetAuthenticationSchemes extracts the relevant authentication scheme data -// into a custom strongly typed Go data structure. -func GetAuthenticationSchemes(p *smithy.Properties) ([]AuthenticationScheme, error) { - var result []AuthenticationScheme - if !p.Has("authSchemes") { - return nil, &NoAuthenticationSchemesFoundError{} - } - - authSchemes, _ := p.Get("authSchemes").([]interface{}) - - var unsupportedSchemes []string - for _, scheme := range authSchemes { - authScheme, _ := scheme.(map[string]interface{}) - - version := authScheme["name"].(string) - switch version { - case SigV4, SigV4S3Express: - v4Scheme := AuthenticationSchemeV4{ - Name: version, - SigningName: getSigningName(authScheme), - SigningRegion: getSigningRegion(authScheme), - DisableDoubleEncoding: getDisableDoubleEncoding(authScheme), - } - result = append(result, AuthenticationScheme(&v4Scheme)) - case SigV4A: - v4aScheme := AuthenticationSchemeV4A{ - Name: SigV4A, - SigningName: getSigningName(authScheme), - SigningRegionSet: getSigningRegionSet(authScheme), - DisableDoubleEncoding: getDisableDoubleEncoding(authScheme), - } - result = append(result, AuthenticationScheme(&v4aScheme)) - case None: - noneScheme := AuthenticationSchemeNone{} - result = append(result, AuthenticationScheme(&noneScheme)) - default: - unsupportedSchemes = append(unsupportedSchemes, authScheme["name"].(string)) - continue - } - } - - if len(result) == 0 { - return nil, &UnSupportedAuthenticationSchemeSpecifiedError{ - UnsupportedSchemes: unsupportedSchemes, - } - } - - return result, nil -} - -type disableDoubleEncoding struct{} - -// SetDisableDoubleEncoding sets or modifies the disable double encoding option -// on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetDisableDoubleEncoding(ctx context.Context, value bool) context.Context { - return middleware.WithStackValue(ctx, disableDoubleEncoding{}, value) -} - -// GetDisableDoubleEncoding retrieves the disable double encoding option -// from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetDisableDoubleEncoding(ctx context.Context) (value bool, ok bool) { - value, ok = middleware.GetStackValue(ctx, disableDoubleEncoding{}).(bool) - return value, ok -} - -func getSigningName(authScheme map[string]interface{}) *string { - signingName, ok := authScheme["signingName"].(string) - if !ok || signingName == "" { - return nil - } - return &signingName -} - -func getSigningRegionSet(authScheme map[string]interface{}) []string { - untypedSigningRegionSet, ok := authScheme["signingRegionSet"].([]interface{}) - if !ok { - return nil - } - signingRegionSet := []string{} - for _, item := range untypedSigningRegionSet { - signingRegionSet = append(signingRegionSet, item.(string)) - } - return signingRegionSet -} - -func getSigningRegion(authScheme map[string]interface{}) *string { - signingRegion, ok := authScheme["signingRegion"].(string) - if !ok || signingRegion == "" { - return nil - } - return &signingRegion -} - -func getDisableDoubleEncoding(authScheme map[string]interface{}) *bool { - disableDoubleEncoding, ok := authScheme["disableDoubleEncoding"].(bool) - if !ok { - return nil - } - return &disableDoubleEncoding -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go deleted file mode 100644 index f059b5d391..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go +++ /dev/null @@ -1,43 +0,0 @@ -package smithy - -import ( - "context" - "fmt" - "time" - - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/auth/bearer" -) - -// BearerTokenAdapter adapts smithy bearer.Token to smithy auth.Identity. -type BearerTokenAdapter struct { - Token bearer.Token -} - -var _ auth.Identity = (*BearerTokenAdapter)(nil) - -// Expiration returns the time of expiration for the token. -func (v *BearerTokenAdapter) Expiration() time.Time { - return v.Token.Expires -} - -// BearerTokenProviderAdapter adapts smithy bearer.TokenProvider to smithy -// auth.IdentityResolver. -type BearerTokenProviderAdapter struct { - Provider bearer.TokenProvider -} - -var _ (auth.IdentityResolver) = (*BearerTokenProviderAdapter)(nil) - -// GetIdentity retrieves a bearer token using the underlying provider. -func (v *BearerTokenProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) ( - auth.Identity, error, -) { - token, err := v.Provider.RetrieveBearerToken(ctx) - if err != nil { - return nil, fmt.Errorf("get token: %w", err) - } - - return &BearerTokenAdapter{Token: token}, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go deleted file mode 100644 index a88281527c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go +++ /dev/null @@ -1,35 +0,0 @@ -package smithy - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/auth/bearer" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// BearerTokenSignerAdapter adapts smithy bearer.Signer to smithy http -// auth.Signer. -type BearerTokenSignerAdapter struct { - Signer bearer.Signer -} - -var _ (smithyhttp.Signer) = (*BearerTokenSignerAdapter)(nil) - -// SignRequest signs the request with the provided bearer token. -func (v *BearerTokenSignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, _ smithy.Properties) error { - ca, ok := identity.(*BearerTokenAdapter) - if !ok { - return fmt.Errorf("unexpected identity type: %T", identity) - } - - signed, err := v.Signer.SignWithBearerToken(ctx, ca.Token, r) - if err != nil { - return fmt.Errorf("sign request: %w", err) - } - - *r = *signed.(*smithyhttp.Request) - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go deleted file mode 100644 index f926c4aaa7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go +++ /dev/null @@ -1,46 +0,0 @@ -package smithy - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" -) - -// CredentialsAdapter adapts aws.Credentials to auth.Identity. -type CredentialsAdapter struct { - Credentials aws.Credentials -} - -var _ auth.Identity = (*CredentialsAdapter)(nil) - -// Expiration returns the time of expiration for the credentials. -func (v *CredentialsAdapter) Expiration() time.Time { - return v.Credentials.Expires -} - -// CredentialsProviderAdapter adapts aws.CredentialsProvider to auth.IdentityResolver. -type CredentialsProviderAdapter struct { - Provider aws.CredentialsProvider -} - -var _ (auth.IdentityResolver) = (*CredentialsProviderAdapter)(nil) - -// GetIdentity retrieves AWS credentials using the underlying provider. -func (v *CredentialsProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) ( - auth.Identity, error, -) { - if v.Provider == nil { - return &CredentialsAdapter{Credentials: aws.Credentials{}}, nil - } - - creds, err := v.Provider.Retrieve(ctx) - if err != nil { - return nil, fmt.Errorf("get credentials: %w", err) - } - - return &CredentialsAdapter{Credentials: creds}, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go deleted file mode 100644 index 42b4586733..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package smithy adapts concrete AWS auth and signing types to the generic smithy versions. -package smithy diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go deleted file mode 100644 index 0c5a2d40c9..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go +++ /dev/null @@ -1,53 +0,0 @@ -package smithy - -import ( - "context" - "fmt" - - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// V4SignerAdapter adapts v4.HTTPSigner to smithy http.Signer. -type V4SignerAdapter struct { - Signer v4.HTTPSigner - Logger logging.Logger - LogSigning bool -} - -var _ (smithyhttp.Signer) = (*V4SignerAdapter)(nil) - -// SignRequest signs the request with the provided identity. -func (v *V4SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error { - ca, ok := identity.(*CredentialsAdapter) - if !ok { - return fmt.Errorf("unexpected identity type: %T", identity) - } - - name, ok := smithyhttp.GetSigV4SigningName(&props) - if !ok { - return fmt.Errorf("sigv4 signing name is required") - } - - region, ok := smithyhttp.GetSigV4SigningRegion(&props) - if !ok { - return fmt.Errorf("sigv4 signing region is required") - } - - hash := v4.GetPayloadHash(ctx) - err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, sdk.NowTime(), func(o *v4.SignerOptions) { - o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) - - o.Logger = v.Logger - o.LogSigning = v.LogSigning - }) - if err != nil { - return fmt.Errorf("sign http: %w", err) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md deleted file mode 100644 index 86f5b13725..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ /dev/null @@ -1,276 +0,0 @@ -# v1.3.4 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2024-03-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.9 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.8 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.7 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.6 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.3 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.43 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.42 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.41 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.40 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.39 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.38 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.37 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.36 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.35 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.34 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.33 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.32 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.31 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.30 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.29 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.28 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.27 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.26 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.25 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.24 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.23 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.22 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.21 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.20 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.19 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.18 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.17 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.16 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.15 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.14 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.13 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.12 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.11 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.10 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.9 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.8 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.7 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.6 (2022-03-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.5 (2022-02-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.4 (2022-01-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.3 (2022-01-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.7 (2021-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.6 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.5 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.4 (2021-08-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.2 (2021-08-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.1 (2021-07-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.0 (2021-06-25) - -* **Release**: Release new modules -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go deleted file mode 100644 index cd4d19b898..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go +++ /dev/null @@ -1,65 +0,0 @@ -package configsources - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" -) - -// EnableEndpointDiscoveryProvider is an interface for retrieving external configuration value -// for Enable Endpoint Discovery -type EnableEndpointDiscoveryProvider interface { - GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) -} - -// ResolveEnableEndpointDiscovery extracts the first instance of a EnableEndpointDiscoveryProvider from the config slice. -// Additionally returns a aws.EndpointDiscoveryEnableState to indicate if the value was found in provided configs, -// and error if one is encountered. -func ResolveEnableEndpointDiscovery(ctx context.Context, configs []interface{}) (value aws.EndpointDiscoveryEnableState, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(EnableEndpointDiscoveryProvider); ok { - value, found, err = p.GetEnableEndpointDiscovery(ctx) - if err != nil || found { - break - } - } - } - return -} - -// UseDualStackEndpointProvider is an interface for retrieving external configuration values for UseDualStackEndpoint -type UseDualStackEndpointProvider interface { - GetUseDualStackEndpoint(context.Context) (value aws.DualStackEndpointState, found bool, err error) -} - -// ResolveUseDualStackEndpoint extracts the first instance of a UseDualStackEndpoint from the config slice. -// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. -func ResolveUseDualStackEndpoint(ctx context.Context, configs []interface{}) (value aws.DualStackEndpointState, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(UseDualStackEndpointProvider); ok { - value, found, err = p.GetUseDualStackEndpoint(ctx) - if err != nil || found { - break - } - } - } - return -} - -// UseFIPSEndpointProvider is an interface for retrieving external configuration values for UseFIPSEndpoint -type UseFIPSEndpointProvider interface { - GetUseFIPSEndpoint(context.Context) (value aws.FIPSEndpointState, found bool, err error) -} - -// ResolveUseFIPSEndpoint extracts the first instance of a UseFIPSEndpointProvider from the config slice. -// Additionally, returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. -func ResolveUseFIPSEndpoint(ctx context.Context, configs []interface{}) (value aws.FIPSEndpointState, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(UseFIPSEndpointProvider); ok { - value, found, err = p.GetUseFIPSEndpoint(ctx) - if err != nil || found { - break - } - } - } - return -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go deleted file mode 100644 index e7835f8524..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go +++ /dev/null @@ -1,57 +0,0 @@ -package configsources - -import ( - "context" -) - -// ServiceBaseEndpointProvider is needed to search for all providers -// that provide a configured service endpoint -type ServiceBaseEndpointProvider interface { - GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) -} - -// IgnoreConfiguredEndpointsProvider is needed to search for all providers -// that provide a flag to disable configured endpoints. -// -// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because -// service packages cannot import github.com/aws/aws-sdk-go-v2/config -// due to result import cycle error. -type IgnoreConfiguredEndpointsProvider interface { - GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error) -} - -// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured -// endpoints feature. -// -// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because -// service packages cannot import github.com/aws/aws-sdk-go-v2/config -// due to result import cycle error. -func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { - value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ResolveServiceBaseEndpoint is used to retrieve service endpoints from configured sources -// while allowing for configured endpoints to be disabled -func ResolveServiceBaseEndpoint(ctx context.Context, sdkID string, configs []interface{}) (value string, found bool, err error) { - if val, found, _ := GetIgnoreConfiguredEndpoints(ctx, configs); found && val { - return "", false, nil - } - - for _, cs := range configs { - if p, ok := cs.(ServiceBaseEndpointProvider); ok { - value, found, err = p.GetServiceBaseEndpoint(context.Background(), sdkID) - if err != nil || found { - break - } - } - } - return -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go deleted file mode 100644 index d25782e9ce..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package configsources - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go deleted file mode 100644 index e6223dd3b3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go +++ /dev/null @@ -1,94 +0,0 @@ -package awsrulesfn - -import ( - "strings" -) - -// ARN provides AWS ARN components broken out into a data structure. -type ARN struct { - Partition string - Service string - Region string - AccountId string - ResourceId OptionalStringSlice -} - -const ( - arnDelimiters = ":" - resourceDelimiters = "/:" - arnSections = 6 - arnPrefix = "arn:" - - // zero-indexed - sectionPartition = 1 - sectionService = 2 - sectionRegion = 3 - sectionAccountID = 4 - sectionResource = 5 -) - -// ParseARN returns an [ARN] value parsed from the input string provided. If -// the ARN cannot be parsed nil will be returned, and error added to -// [ErrorCollector]. -func ParseARN(input string) *ARN { - if !strings.HasPrefix(input, arnPrefix) { - return nil - } - - sections := strings.SplitN(input, arnDelimiters, arnSections) - if numSections := len(sections); numSections != arnSections { - return nil - } - - if sections[sectionPartition] == "" { - return nil - } - if sections[sectionService] == "" { - return nil - } - if sections[sectionResource] == "" { - return nil - } - - return &ARN{ - Partition: sections[sectionPartition], - Service: sections[sectionService], - Region: sections[sectionRegion], - AccountId: sections[sectionAccountID], - ResourceId: splitResource(sections[sectionResource]), - } -} - -// splitResource splits the resource components by the ARN resource delimiters. -func splitResource(v string) []string { - var parts []string - var offset int - - for offset <= len(v) { - idx := strings.IndexAny(v[offset:], "/:") - if idx < 0 { - parts = append(parts, v[offset:]) - break - } - parts = append(parts, v[offset:idx+offset]) - offset += idx + 1 - } - - return parts -} - -// OptionalStringSlice provides a helper to safely get the index of a string -// slice that may be out of bounds. Returns pointer to string if index is -// valid. Otherwise returns nil. -type OptionalStringSlice []string - -// Get returns a string pointer of the string at index i if the index is valid. -// Otherwise returns nil. -func (s OptionalStringSlice) Get(i int) *string { - if i < 0 || i >= len(s) { - return nil - } - - v := s[i] - return &v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go deleted file mode 100644 index d5a365853f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package awsrulesfn provides AWS focused endpoint rule functions for -// evaluating endpoint resolution rules. -package awsrulesfn diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go deleted file mode 100644 index df72da97ce..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build codegen -// +build codegen - -package awsrulesfn - -//go:generate go run -tags codegen ./internal/partition/codegen.go -model partitions.json -output partitions.go -//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go deleted file mode 100644 index 637e5fc18e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go +++ /dev/null @@ -1,51 +0,0 @@ -package awsrulesfn - -import ( - "net" - "strings" - - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// IsVirtualHostableS3Bucket returns if the input is a DNS compatible bucket -// name and can be used with Amazon S3 virtual hosted style addressing. Similar -// to [rulesfn.IsValidHostLabel] with the added restriction that the length of label -// must be [3:63] characters long, all lowercase, and not formatted as an IP -// address. -func IsVirtualHostableS3Bucket(input string, allowSubDomains bool) bool { - // input should not be formatted as an IP address - // NOTE: this will technically trip up on IPv6 hosts with zone IDs, but - // validation further down will catch that anyway (it's guaranteed to have - // unfriendly characters % and : if that's the case) - if net.ParseIP(input) != nil { - return false - } - - var labels []string - if allowSubDomains { - labels = strings.Split(input, ".") - } else { - labels = []string{input} - } - - for _, label := range labels { - // validate special length constraints - if l := len(label); l < 3 || l > 63 { - return false - } - - // Validate no capital letters - for _, r := range label { - if r >= 'A' && r <= 'Z' { - return false - } - } - - // Validate valid host label - if !smithyhttp.ValidHostLabel(label) { - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go deleted file mode 100644 index ba6032758a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go +++ /dev/null @@ -1,75 +0,0 @@ -package awsrulesfn - -import "regexp" - -// Partition provides the metadata describing an AWS partition. -type Partition struct { - ID string `json:"id"` - Regions map[string]RegionOverrides `json:"regions"` - RegionRegex string `json:"regionRegex"` - DefaultConfig PartitionConfig `json:"outputs"` -} - -// PartitionConfig provides the endpoint metadata for an AWS region or partition. -type PartitionConfig struct { - Name string `json:"name"` - DnsSuffix string `json:"dnsSuffix"` - DualStackDnsSuffix string `json:"dualStackDnsSuffix"` - SupportsFIPS bool `json:"supportsFIPS"` - SupportsDualStack bool `json:"supportsDualStack"` -} - -type RegionOverrides struct { - Name *string `json:"name"` - DnsSuffix *string `json:"dnsSuffix"` - DualStackDnsSuffix *string `json:"dualStackDnsSuffix"` - SupportsFIPS *bool `json:"supportsFIPS"` - SupportsDualStack *bool `json:"supportsDualStack"` -} - -const defaultPartition = "aws" - -func getPartition(partitions []Partition, region string) *PartitionConfig { - for _, partition := range partitions { - if v, ok := partition.Regions[region]; ok { - p := mergeOverrides(partition.DefaultConfig, v) - return &p - } - } - - for _, partition := range partitions { - regionRegex := regexp.MustCompile(partition.RegionRegex) - if regionRegex.MatchString(region) { - v := partition.DefaultConfig - return &v - } - } - - for _, partition := range partitions { - if partition.ID == defaultPartition { - v := partition.DefaultConfig - return &v - } - } - - return nil -} - -func mergeOverrides(into PartitionConfig, from RegionOverrides) PartitionConfig { - if from.Name != nil { - into.Name = *from.Name - } - if from.DnsSuffix != nil { - into.DnsSuffix = *from.DnsSuffix - } - if from.DualStackDnsSuffix != nil { - into.DualStackDnsSuffix = *from.DualStackDnsSuffix - } - if from.SupportsFIPS != nil { - into.SupportsFIPS = *from.SupportsFIPS - } - if from.SupportsDualStack != nil { - into.SupportsDualStack = *from.SupportsDualStack - } - return into -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go deleted file mode 100644 index 849beffd7d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go +++ /dev/null @@ -1,381 +0,0 @@ -// Code generated by endpoint/awsrulesfn/internal/partition. DO NOT EDIT. - -package awsrulesfn - -// GetPartition returns an AWS [Partition] for the region provided. If the -// partition cannot be determined nil will be returned. -func GetPartition(region string) *PartitionConfig { - return getPartition(partitions, region) -} - -var partitions = []Partition{ - { - ID: "aws", - RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", - SupportsFIPS: true, - SupportsDualStack: true, - }, - Regions: map[string]RegionOverrides{ - "af-south-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-northeast-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-northeast-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-northeast-3": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-south-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-south-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-southeast-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-southeast-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-southeast-3": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-southeast-4": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "aws-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ca-central-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-central-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-central-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-north-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-south-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-south-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-west-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-west-3": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "il-central-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "me-central-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "me-south-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "sa-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-east-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-west-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-cn", - RegionRegex: "^cn\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-cn", - DnsSuffix: "amazonaws.com.cn", - DualStackDnsSuffix: "api.amazonwebservices.com.cn", - SupportsFIPS: true, - SupportsDualStack: true, - }, - Regions: map[string]RegionOverrides{ - "aws-cn-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "cn-north-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "cn-northwest-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-us-gov", - RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-us-gov", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", - SupportsFIPS: true, - SupportsDualStack: true, - }, - Regions: map[string]RegionOverrides{ - "aws-us-gov-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-iso", - RegionRegex: "^us\\-iso\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-iso", - DnsSuffix: "c2s.ic.gov", - DualStackDnsSuffix: "c2s.ic.gov", - SupportsFIPS: true, - SupportsDualStack: false, - }, - Regions: map[string]RegionOverrides{ - "aws-iso-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-iso-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-iso-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-iso-b", - RegionRegex: "^us\\-isob\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-iso-b", - DnsSuffix: "sc2s.sgov.gov", - DualStackDnsSuffix: "sc2s.sgov.gov", - SupportsFIPS: true, - SupportsDualStack: false, - }, - Regions: map[string]RegionOverrides{ - "aws-iso-b-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-isob-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-iso-e", - RegionRegex: "^eu\\-isoe\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-iso-e", - DnsSuffix: "cloud.adc-e.uk", - DualStackDnsSuffix: "cloud.adc-e.uk", - SupportsFIPS: true, - SupportsDualStack: false, - }, - Regions: map[string]RegionOverrides{}, - }, - { - ID: "aws-iso-f", - RegionRegex: "^us\\-isof\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-iso-f", - DnsSuffix: "csp.hci.ic.gov", - DualStackDnsSuffix: "csp.hci.ic.gov", - SupportsFIPS: true, - SupportsDualStack: false, - }, - Regions: map[string]RegionOverrides{}, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json deleted file mode 100644 index f376f6908a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ /dev/null @@ -1,216 +0,0 @@ -{ - "partitions" : [ { - "id" : "aws", - "outputs" : { - "dnsSuffix" : "amazonaws.com", - "dualStackDnsSuffix" : "api.aws", - "implicitGlobalRegion" : "us-east-1", - "name" : "aws", - "supportsDualStack" : true, - "supportsFIPS" : true - }, - "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", - "regions" : { - "af-south-1" : { - "description" : "Africa (Cape Town)" - }, - "ap-east-1" : { - "description" : "Asia Pacific (Hong Kong)" - }, - "ap-northeast-1" : { - "description" : "Asia Pacific (Tokyo)" - }, - "ap-northeast-2" : { - "description" : "Asia Pacific (Seoul)" - }, - "ap-northeast-3" : { - "description" : "Asia Pacific (Osaka)" - }, - "ap-south-1" : { - "description" : "Asia Pacific (Mumbai)" - }, - "ap-south-2" : { - "description" : "Asia Pacific (Hyderabad)" - }, - "ap-southeast-1" : { - "description" : "Asia Pacific (Singapore)" - }, - "ap-southeast-2" : { - "description" : "Asia Pacific (Sydney)" - }, - "ap-southeast-3" : { - "description" : "Asia Pacific (Jakarta)" - }, - "ap-southeast-4" : { - "description" : "Asia Pacific (Melbourne)" - }, - "aws-global" : { - "description" : "AWS Standard global region" - }, - "ca-central-1" : { - "description" : "Canada (Central)" - }, - "ca-west-1" : { - "description" : "Canada West (Calgary)" - }, - "eu-central-1" : { - "description" : "Europe (Frankfurt)" - }, - "eu-central-2" : { - "description" : "Europe (Zurich)" - }, - "eu-north-1" : { - "description" : "Europe (Stockholm)" - }, - "eu-south-1" : { - "description" : "Europe (Milan)" - }, - "eu-south-2" : { - "description" : "Europe (Spain)" - }, - "eu-west-1" : { - "description" : "Europe (Ireland)" - }, - "eu-west-2" : { - "description" : "Europe (London)" - }, - "eu-west-3" : { - "description" : "Europe (Paris)" - }, - "il-central-1" : { - "description" : "Israel (Tel Aviv)" - }, - "me-central-1" : { - "description" : "Middle East (UAE)" - }, - "me-south-1" : { - "description" : "Middle East (Bahrain)" - }, - "sa-east-1" : { - "description" : "South America (Sao Paulo)" - }, - "us-east-1" : { - "description" : "US East (N. Virginia)" - }, - "us-east-2" : { - "description" : "US East (Ohio)" - }, - "us-west-1" : { - "description" : "US West (N. California)" - }, - "us-west-2" : { - "description" : "US West (Oregon)" - } - } - }, { - "id" : "aws-cn", - "outputs" : { - "dnsSuffix" : "amazonaws.com.cn", - "dualStackDnsSuffix" : "api.amazonwebservices.com.cn", - "implicitGlobalRegion" : "cn-northwest-1", - "name" : "aws-cn", - "supportsDualStack" : true, - "supportsFIPS" : true - }, - "regionRegex" : "^cn\\-\\w+\\-\\d+$", - "regions" : { - "aws-cn-global" : { - "description" : "AWS China global region" - }, - "cn-north-1" : { - "description" : "China (Beijing)" - }, - "cn-northwest-1" : { - "description" : "China (Ningxia)" - } - } - }, { - "id" : "aws-us-gov", - "outputs" : { - "dnsSuffix" : "amazonaws.com", - "dualStackDnsSuffix" : "api.aws", - "implicitGlobalRegion" : "us-gov-west-1", - "name" : "aws-us-gov", - "supportsDualStack" : true, - "supportsFIPS" : true - }, - "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", - "regions" : { - "aws-us-gov-global" : { - "description" : "AWS GovCloud (US) global region" - }, - "us-gov-east-1" : { - "description" : "AWS GovCloud (US-East)" - }, - "us-gov-west-1" : { - "description" : "AWS GovCloud (US-West)" - } - } - }, { - "id" : "aws-iso", - "outputs" : { - "dnsSuffix" : "c2s.ic.gov", - "dualStackDnsSuffix" : "c2s.ic.gov", - "implicitGlobalRegion" : "us-iso-east-1", - "name" : "aws-iso", - "supportsDualStack" : false, - "supportsFIPS" : true - }, - "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", - "regions" : { - "aws-iso-global" : { - "description" : "AWS ISO (US) global region" - }, - "us-iso-east-1" : { - "description" : "US ISO East" - }, - "us-iso-west-1" : { - "description" : "US ISO WEST" - } - } - }, { - "id" : "aws-iso-b", - "outputs" : { - "dnsSuffix" : "sc2s.sgov.gov", - "dualStackDnsSuffix" : "sc2s.sgov.gov", - "implicitGlobalRegion" : "us-isob-east-1", - "name" : "aws-iso-b", - "supportsDualStack" : false, - "supportsFIPS" : true - }, - "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", - "regions" : { - "aws-iso-b-global" : { - "description" : "AWS ISOB (US) global region" - }, - "us-isob-east-1" : { - "description" : "US ISOB East (Ohio)" - } - } - }, { - "id" : "aws-iso-e", - "outputs" : { - "dnsSuffix" : "cloud.adc-e.uk", - "dualStackDnsSuffix" : "cloud.adc-e.uk", - "implicitGlobalRegion" : "eu-isoe-west-1", - "name" : "aws-iso-e", - "supportsDualStack" : false, - "supportsFIPS" : true - }, - "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", - "regions" : { } - }, { - "id" : "aws-iso-f", - "outputs" : { - "dnsSuffix" : "csp.hci.ic.gov", - "dualStackDnsSuffix" : "csp.hci.ic.gov", - "implicitGlobalRegion" : "us-isof-south-1", - "name" : "aws-iso-f", - "supportsDualStack" : false, - "supportsFIPS" : true - }, - "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", - "regions" : { } - } ], - "version" : "1.1" -} \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go deleted file mode 100644 index 67950ca366..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go +++ /dev/null @@ -1,201 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -const ( - defaultProtocol = "https" - defaultSigner = "v4" -) - -var ( - protocolPriority = []string{"https", "http"} - signerPriority = []string{"v4"} -) - -// Options provide configuration needed to direct how endpoints are resolved. -type Options struct { - // Disable usage of HTTPS (TLS / SSL) - DisableHTTPS bool -} - -// Partitions is a slice of partition -type Partitions []Partition - -// ResolveEndpoint resolves a service endpoint for the given region and options. -func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) { - if len(ps) == 0 { - return aws.Endpoint{}, fmt.Errorf("no partitions found") - } - - for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(region) { - continue - } - - return ps[i].ResolveEndpoint(region, opts) - } - - // fallback to first partition format to use when resolving the endpoint. - return ps[0].ResolveEndpoint(region, opts) -} - -// Partition is an AWS partition description for a service and its' region endpoints. -type Partition struct { - ID string - RegionRegex *regexp.Regexp - PartitionEndpoint string - IsRegionalized bool - Defaults Endpoint - Endpoints Endpoints -} - -func (p Partition) canResolveEndpoint(region string) bool { - _, ok := p.Endpoints[region] - return ok || p.RegionRegex.MatchString(region) -} - -// ResolveEndpoint resolves and service endpoint for the given region and options. -func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) { - if len(region) == 0 && len(p.PartitionEndpoint) != 0 { - region = p.PartitionEndpoint - } - - e, _ := p.endpointForRegion(region) - - return e.resolve(p.ID, region, p.Defaults, options), nil -} - -func (p Partition) endpointForRegion(region string) (Endpoint, bool) { - if e, ok := p.Endpoints[region]; ok { - return e, true - } - - if !p.IsRegionalized { - return p.Endpoints[p.PartitionEndpoint], region == p.PartitionEndpoint - } - - // Unable to find any matching endpoint, return - // blank that will be used for generic endpoint creation. - return Endpoint{}, false -} - -// Endpoints is a map of service config regions to endpoints -type Endpoints map[string]Endpoint - -// CredentialScope is the credential scope of a region and service -type CredentialScope struct { - Region string - Service string -} - -// Endpoint is a service endpoint description -type Endpoint struct { - // True if the endpoint cannot be resolved for this partition/region/service - Unresolveable aws.Ternary - - Hostname string - Protocols []string - - CredentialScope CredentialScope - - SignatureVersions []string `json:"signatureVersions"` -} - -func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) aws.Endpoint { - var merged Endpoint - merged.mergeIn(def) - merged.mergeIn(e) - e = merged - - var u string - if e.Unresolveable != aws.TrueTernary { - // Only attempt to resolve the endpoint if it can be resolved. - hostname := strings.Replace(e.Hostname, "{region}", region, 1) - - scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS) - u = scheme + "://" + hostname - } - - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - signingName := e.CredentialScope.Service - - return aws.Endpoint{ - URL: u, - PartitionID: partition, - SigningRegion: signingRegion, - SigningName: signingName, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - } -} - -func (e *Endpoint) mergeIn(other Endpoint) { - if other.Unresolveable != aws.UnknownTernary { - e.Unresolveable = other.Unresolveable - } - if len(other.Hostname) > 0 { - e.Hostname = other.Hostname - } - if len(other.Protocols) > 0 { - e.Protocols = other.Protocols - } - if len(other.CredentialScope.Region) > 0 { - e.CredentialScope.Region = other.CredentialScope.Region - } - if len(other.CredentialScope.Service) > 0 { - e.CredentialScope.Service = other.CredentialScope.Service - } - if len(other.SignatureVersions) > 0 { - e.SignatureVersions = other.SignatureVersions - } -} - -func getEndpointScheme(protocols []string, disableHTTPS bool) string { - if disableHTTPS { - return "http" - } - - return getByPriority(protocols, protocolPriority, defaultProtocol) -} - -func getByPriority(s []string, p []string, def string) string { - if len(s) == 0 { - return def - } - - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { - if s[j] == p[i] { - return s[j] - } - } - } - - return s[0] -} - -// MapFIPSRegion extracts the intrinsic AWS region from one that may have an -// embedded FIPS microformat. -func MapFIPSRegion(region string) string { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(region, fipsInfix) || - strings.Contains(region, fipsPrefix) || - strings.Contains(region, fipsSuffix) { - region = strings.ReplaceAll(region, fipsInfix, "-") - region = strings.ReplaceAll(region, fipsPrefix, "") - region = strings.ReplaceAll(region, fipsSuffix, "") - } - - return region -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md deleted file mode 100644 index 5bb02f574f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ /dev/null @@ -1,250 +0,0 @@ -# v2.6.4 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.3 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.9 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.8 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.7 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.6 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.3 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.37 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.36 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.35 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.34 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.33 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.32 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.31 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.30 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.29 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.28 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.27 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.26 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.25 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.24 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.23 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.22 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.21 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.20 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.19 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.18 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.17 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.16 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.15 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.14 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.13 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.12 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.11 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.10 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.9 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.6 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.3.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.2.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.1.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.0.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.0.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.0.0 (2021-11-06) - -* **Release**: Endpoint Variant Model Support -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go deleted file mode 100644 index 32251a7e3c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go +++ /dev/null @@ -1,302 +0,0 @@ -package endpoints - -import ( - "fmt" - "github.com/aws/smithy-go/logging" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// DefaultKey is a compound map key of a variant and other values. -type DefaultKey struct { - Variant EndpointVariant - ServiceVariant ServiceVariant -} - -// EndpointKey is a compound map key of a region and associated variant value. -type EndpointKey struct { - Region string - Variant EndpointVariant - ServiceVariant ServiceVariant -} - -// EndpointVariant is a bit field to describe the endpoints attributes. -type EndpointVariant uint64 - -const ( - // FIPSVariant indicates that the endpoint is FIPS capable. - FIPSVariant EndpointVariant = 1 << (64 - 1 - iota) - - // DualStackVariant indicates that the endpoint is DualStack capable. - DualStackVariant -) - -// ServiceVariant is a bit field to describe the service endpoint attributes. -type ServiceVariant uint64 - -const ( - defaultProtocol = "https" - defaultSigner = "v4" -) - -var ( - protocolPriority = []string{"https", "http"} - signerPriority = []string{"v4", "s3v4"} -) - -// Options provide configuration needed to direct how endpoints are resolved. -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the provided logger. - LogDeprecated bool - - // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority - // over the region name passed to the ResolveEndpoint call. - ResolvedRegion string - - // Disable usage of HTTPS (TLS / SSL) - DisableHTTPS bool - - // Instruct the resolver to use a service endpoint that supports dual-stack. - // If a service does not have a dual-stack endpoint an error will be returned by the resolver. - UseDualStackEndpoint aws.DualStackEndpointState - - // Instruct the resolver to use a service endpoint that supports FIPS. - // If a service does not have a FIPS endpoint an error will be returned by the resolver. - UseFIPSEndpoint aws.FIPSEndpointState - - // ServiceVariant is a bitfield of service specified endpoint variant data. - ServiceVariant ServiceVariant -} - -// GetEndpointVariant returns the EndpointVariant for the variant associated options. -func (o Options) GetEndpointVariant() (v EndpointVariant) { - if o.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { - v |= DualStackVariant - } - if o.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - v |= FIPSVariant - } - return v -} - -// Partitions is a slice of partition -type Partitions []Partition - -// ResolveEndpoint resolves a service endpoint for the given region and options. -func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) { - if len(ps) == 0 { - return aws.Endpoint{}, fmt.Errorf("no partitions found") - } - - if opts.Logger == nil { - opts.Logger = logging.Nop{} - } - - if len(opts.ResolvedRegion) > 0 { - region = opts.ResolvedRegion - } - - for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(region, opts) { - continue - } - - return ps[i].ResolveEndpoint(region, opts) - } - - // fallback to first partition format to use when resolving the endpoint. - return ps[0].ResolveEndpoint(region, opts) -} - -// Partition is an AWS partition description for a service and its' region endpoints. -type Partition struct { - ID string - RegionRegex *regexp.Regexp - PartitionEndpoint string - IsRegionalized bool - Defaults map[DefaultKey]Endpoint - Endpoints Endpoints -} - -func (p Partition) canResolveEndpoint(region string, opts Options) bool { - _, ok := p.Endpoints[EndpointKey{ - Region: region, - Variant: opts.GetEndpointVariant(), - }] - return ok || p.RegionRegex.MatchString(region) -} - -// ResolveEndpoint resolves and service endpoint for the given region and options. -func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) { - if len(region) == 0 && len(p.PartitionEndpoint) != 0 { - region = p.PartitionEndpoint - } - - endpoints := p.Endpoints - - variant := options.GetEndpointVariant() - serviceVariant := options.ServiceVariant - - defaults := p.Defaults[DefaultKey{ - Variant: variant, - ServiceVariant: serviceVariant, - }] - - return p.endpointForRegion(region, variant, serviceVariant, endpoints).resolve(p.ID, region, defaults, options) -} - -func (p Partition) endpointForRegion(region string, variant EndpointVariant, serviceVariant ServiceVariant, endpoints Endpoints) Endpoint { - key := EndpointKey{ - Region: region, - Variant: variant, - } - - if e, ok := endpoints[key]; ok { - return e - } - - if !p.IsRegionalized { - return endpoints[EndpointKey{ - Region: p.PartitionEndpoint, - Variant: variant, - ServiceVariant: serviceVariant, - }] - } - - // Unable to find any matching endpoint, return - // blank that will be used for generic endpoint creation. - return Endpoint{} -} - -// Endpoints is a map of service config regions to endpoints -type Endpoints map[EndpointKey]Endpoint - -// CredentialScope is the credential scope of a region and service -type CredentialScope struct { - Region string - Service string -} - -// Endpoint is a service endpoint description -type Endpoint struct { - // True if the endpoint cannot be resolved for this partition/region/service - Unresolveable aws.Ternary - - Hostname string - Protocols []string - - CredentialScope CredentialScope - - SignatureVersions []string - - // Indicates that this endpoint is deprecated. - Deprecated aws.Ternary -} - -// IsZero returns whether the endpoint structure is an empty (zero) value. -func (e Endpoint) IsZero() bool { - switch { - case e.Unresolveable != aws.UnknownTernary: - return false - case len(e.Hostname) != 0: - return false - case len(e.Protocols) != 0: - return false - case e.CredentialScope != (CredentialScope{}): - return false - case len(e.SignatureVersions) != 0: - return false - } - return true -} - -func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) (aws.Endpoint, error) { - var merged Endpoint - merged.mergeIn(def) - merged.mergeIn(e) - e = merged - - if e.IsZero() { - return aws.Endpoint{}, fmt.Errorf("unable to resolve endpoint for region: %v", region) - } - - var u string - if e.Unresolveable != aws.TrueTernary { - // Only attempt to resolve the endpoint if it can be resolved. - hostname := strings.Replace(e.Hostname, "{region}", region, 1) - - scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS) - u = scheme + "://" + hostname - } - - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - signingName := e.CredentialScope.Service - - if e.Deprecated == aws.TrueTernary && options.LogDeprecated { - options.Logger.Logf(logging.Warn, "endpoint identifier %q, url %q marked as deprecated", region, u) - } - - return aws.Endpoint{ - URL: u, - PartitionID: partition, - SigningRegion: signingRegion, - SigningName: signingName, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - }, nil -} - -func (e *Endpoint) mergeIn(other Endpoint) { - if other.Unresolveable != aws.UnknownTernary { - e.Unresolveable = other.Unresolveable - } - if len(other.Hostname) > 0 { - e.Hostname = other.Hostname - } - if len(other.Protocols) > 0 { - e.Protocols = other.Protocols - } - if len(other.CredentialScope.Region) > 0 { - e.CredentialScope.Region = other.CredentialScope.Region - } - if len(other.CredentialScope.Service) > 0 { - e.CredentialScope.Service = other.CredentialScope.Service - } - if len(other.SignatureVersions) > 0 { - e.SignatureVersions = other.SignatureVersions - } - if other.Deprecated != aws.UnknownTernary { - e.Deprecated = other.Deprecated - } -} - -func getEndpointScheme(protocols []string, disableHTTPS bool) string { - if disableHTTPS { - return "http" - } - - return getByPriority(protocols, protocolPriority, defaultProtocol) -} - -func getByPriority(s []string, p []string, def string) string { - if len(s) == 0 { - return def - } - - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { - if s[j] == p[i] { - return s[j] - } - } - } - - return s[0] -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go deleted file mode 100644 index bb857bcb97..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package endpoints - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "2.6.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md deleted file mode 100644 index c0e54faff2..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ /dev/null @@ -1,271 +0,0 @@ -# v1.8.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. - -# v1.7.3 (2024-01-22) - -* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons. - -# v1.7.2 (2023-12-08) - -* **Bug Fix**: Correct loading of [services *] sections into shared config. - -# v1.7.1 (2023-11-16) - -* **Bug Fix**: Fix recognition of trailing comments in shared config properties. # or ; separators that aren't preceded by whitespace at the end of a property value should be considered part of it. - -# v1.7.0 (2023-11-13) - -* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section. - -# v1.6.0 (2023-11-09.2) - -* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored - -# v1.5.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.1 (2023-11-07) - -* **Bug Fix**: Fix subproperty performance regression - -# v1.5.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.45 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.44 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.43 (2023-09-22) - -* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. -* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. - -# v1.3.42 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.41 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.40 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.39 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.38 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.37 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.36 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.35 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.34 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.33 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.32 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.31 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.30 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.29 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.28 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.27 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.26 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.25 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.24 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.23 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.22 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.21 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.20 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.19 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.18 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.17 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.16 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.15 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.14 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.13 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.12 (2022-05-17) - -* **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.11 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.10 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.9 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.8 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.7 (2022-03-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.6 (2022-02-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.5 (2022-01-28) - -* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. - -# v1.3.4 (2022-01-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2022-01-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.5 (2021-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.4 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.3 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.2 (2021-08-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-08-04) - -* **Feature**: adds error handling for defered close calls -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2021-07-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2021-07-01) - -* **Feature**: Support for `:`, `=`, `[`, `]` being present in expression values. - -# v1.0.1 (2021-06-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.0 (2021-05-20) - -* **Release**: The `github.com/aws/aws-sdk-go-v2/internal/ini` package is now a Go Module. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go deleted file mode 100644 index 0f278d55e6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go +++ /dev/null @@ -1,22 +0,0 @@ -package ini - -import "fmt" - -// UnableToReadFile is an error indicating that a ini file could not be read -type UnableToReadFile struct { - Err error -} - -// Error returns an error message and the underlying error message if present -func (e *UnableToReadFile) Error() string { - base := "unable to read file" - if e.Err == nil { - return base - } - return fmt.Sprintf("%s: %v", base, e.Err) -} - -// Unwrap returns the underlying error -func (e *UnableToReadFile) Unwrap() error { - return e.Err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go deleted file mode 100644 index 6e0b906c34..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package ini - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.8.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go deleted file mode 100644 index cefcce91e7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go +++ /dev/null @@ -1,56 +0,0 @@ -// Package ini implements parsing of the AWS shared config file. -// -// Example: -// sections, err := ini.OpenFile("/path/to/file") -// if err != nil { -// panic(err) -// } -// -// profile := "foo" -// section, ok := sections.GetSection(profile) -// if !ok { -// fmt.Printf("section %q could not be found", profile) -// } -package ini - -import ( - "fmt" - "io" - "os" - "strings" -) - -// OpenFile parses shared config from the given file path. -func OpenFile(path string) (sections Sections, err error) { - f, oerr := os.Open(path) - if oerr != nil { - return Sections{}, &UnableToReadFile{Err: oerr} - } - - defer func() { - closeErr := f.Close() - if err == nil { - err = closeErr - } else if closeErr != nil { - err = fmt.Errorf("close error: %v, original error: %w", closeErr, err) - } - }() - - return Parse(f, path) -} - -// Parse parses shared config from the given reader. -func Parse(r io.Reader, path string) (Sections, error) { - contents, err := io.ReadAll(r) - if err != nil { - return Sections{}, fmt.Errorf("read all: %v", err) - } - - lines := strings.Split(string(contents), "\n") - tokens, err := tokenize(lines) - if err != nil { - return Sections{}, fmt.Errorf("tokenize: %v", err) - } - - return parse(tokens, path), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go deleted file mode 100644 index 2422d90461..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go +++ /dev/null @@ -1,109 +0,0 @@ -package ini - -import ( - "fmt" - "strings" -) - -func parse(tokens []lineToken, path string) Sections { - parser := &parser{ - path: path, - sections: NewSections(), - } - parser.parse(tokens) - return parser.sections -} - -type parser struct { - csection, ckey string // current state - path string // source file path - sections Sections // parse result -} - -func (p *parser) parse(tokens []lineToken) { - for _, otok := range tokens { - switch tok := otok.(type) { - case *lineTokenProfile: - p.handleProfile(tok) - case *lineTokenProperty: - p.handleProperty(tok) - case *lineTokenSubProperty: - p.handleSubProperty(tok) - case *lineTokenContinuation: - p.handleContinuation(tok) - } - } -} - -func (p *parser) handleProfile(tok *lineTokenProfile) { - name := tok.Name - if tok.Type != "" { - name = fmt.Sprintf("%s %s", tok.Type, tok.Name) - } - p.ckey = "" - p.csection = name - if _, ok := p.sections.container[name]; !ok { - p.sections.container[name] = NewSection(name) - } -} - -func (p *parser) handleProperty(tok *lineTokenProperty) { - if p.csection == "" { - return // LEGACY: don't error on "global" properties - } - - p.ckey = tok.Key - if _, ok := p.sections.container[p.csection].values[tok.Key]; ok { - section := p.sections.container[p.csection] - section.Logs = append(p.sections.container[p.csection].Logs, - fmt.Sprintf( - "For profile: %v, overriding %v value, with a %v value found in a duplicate profile defined later in the same file %v. \n", - p.csection, tok.Key, tok.Key, p.path, - ), - ) - p.sections.container[p.csection] = section - } - - p.sections.container[p.csection].values[tok.Key] = Value{ - str: tok.Value, - } - p.sections.container[p.csection].SourceFile[tok.Key] = p.path -} - -func (p *parser) handleSubProperty(tok *lineTokenSubProperty) { - if p.csection == "" { - return // LEGACY: don't error on "global" properties - } - - if p.ckey == "" || p.sections.container[p.csection].values[p.ckey].str != "" { - // This is an "orphaned" subproperty, either because it's at - // the beginning of a section or because the last property's - // value isn't empty. Either way we're lenient here and - // "promote" this to a normal property. - p.handleProperty(&lineTokenProperty{ - Key: tok.Key, - Value: strings.TrimSpace(trimPropertyComment(tok.Value)), - }) - return - } - - if p.sections.container[p.csection].values[p.ckey].mp == nil { - p.sections.container[p.csection].values[p.ckey] = Value{ - mp: map[string]string{}, - } - } - p.sections.container[p.csection].values[p.ckey].mp[tok.Key] = tok.Value -} - -func (p *parser) handleContinuation(tok *lineTokenContinuation) { - if p.ckey == "" { - return - } - - value, _ := p.sections.container[p.csection].values[p.ckey] - if value.str != "" && value.mp == nil { - value.str = fmt.Sprintf("%s\n%s", value.str, tok.Value) - } - - p.sections.container[p.csection].values[p.ckey] = value -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go deleted file mode 100644 index dd89848e69..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go +++ /dev/null @@ -1,157 +0,0 @@ -package ini - -import ( - "sort" -) - -// Sections is a map of Section structures that represent -// a configuration. -type Sections struct { - container map[string]Section -} - -// NewSections returns empty ini Sections -func NewSections() Sections { - return Sections{ - container: make(map[string]Section, 0), - } -} - -// GetSection will return section p. If section p does not exist, -// false will be returned in the second parameter. -func (t Sections) GetSection(p string) (Section, bool) { - v, ok := t.container[p] - return v, ok -} - -// HasSection denotes if Sections consist of a section with -// provided name. -func (t Sections) HasSection(p string) bool { - _, ok := t.container[p] - return ok -} - -// SetSection sets a section value for provided section name. -func (t Sections) SetSection(p string, v Section) Sections { - t.container[p] = v - return t -} - -// DeleteSection deletes a section entry/value for provided section name./ -func (t Sections) DeleteSection(p string) { - delete(t.container, p) -} - -// values represents a map of union values. -type values map[string]Value - -// List will return a list of all sections that were successfully -// parsed. -func (t Sections) List() []string { - keys := make([]string, len(t.container)) - i := 0 - for k := range t.container { - keys[i] = k - i++ - } - - sort.Strings(keys) - return keys -} - -// Section contains a name and values. This represent -// a sectioned entry in a configuration file. -type Section struct { - // Name is the Section profile name - Name string - - // values are the values within parsed profile - values values - - // Errors is the list of errors - Errors []error - - // Logs is the list of logs - Logs []string - - // SourceFile is the INI Source file from where this section - // was retrieved. They key is the property, value is the - // source file the property was retrieved from. - SourceFile map[string]string -} - -// NewSection returns an initialize section for the name -func NewSection(name string) Section { - return Section{ - Name: name, - values: values{}, - SourceFile: map[string]string{}, - } -} - -// List will return a list of all -// services in values -func (t Section) List() []string { - keys := make([]string, len(t.values)) - i := 0 - for k := range t.values { - keys[i] = k - i++ - } - - sort.Strings(keys) - return keys -} - -// UpdateSourceFile updates source file for a property to provided filepath. -func (t Section) UpdateSourceFile(property string, filepath string) { - t.SourceFile[property] = filepath -} - -// UpdateValue updates value for a provided key with provided value -func (t Section) UpdateValue(k string, v Value) error { - t.values[k] = v - return nil -} - -// Has will return whether or not an entry exists in a given section -func (t Section) Has(k string) bool { - _, ok := t.values[k] - return ok -} - -// ValueType will returned what type the union is set to. If -// k was not found, the NoneType will be returned. -func (t Section) ValueType(k string) (ValueType, bool) { - v, ok := t.values[k] - return v.Type, ok -} - -// Bool returns a bool value at k -func (t Section) Bool(k string) (bool, bool) { - return t.values[k].BoolValue() -} - -// Int returns an integer value at k -func (t Section) Int(k string) (int64, bool) { - return t.values[k].IntValue() -} - -// Map returns a map value at k -func (t Section) Map(k string) map[string]string { - return t.values[k].MapValue() -} - -// Float64 returns a float value at k -func (t Section) Float64(k string) (float64, bool) { - return t.values[k].FloatValue() -} - -// String returns the string value at k -func (t Section) String(k string) string { - _, ok := t.values[k] - if !ok { - return "" - } - return t.values[k].StringValue() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go deleted file mode 100644 index ed77d08351..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go +++ /dev/null @@ -1,89 +0,0 @@ -package ini - -import ( - "strings" -) - -func trimProfileComment(s string) string { - r, _, _ := strings.Cut(s, "#") - r, _, _ = strings.Cut(r, ";") - return r -} - -func trimPropertyComment(s string) string { - r, _, _ := strings.Cut(s, " #") - r, _, _ = strings.Cut(r, " ;") - r, _, _ = strings.Cut(r, "\t#") - r, _, _ = strings.Cut(r, "\t;") - return r -} - -// assumes no surrounding comment -func splitProperty(s string) (string, string, bool) { - equalsi := strings.Index(s, "=") - coloni := strings.Index(s, ":") // LEGACY: also supported for property assignment - sep := "=" - if equalsi == -1 || coloni != -1 && coloni < equalsi { - sep = ":" - } - - k, v, ok := strings.Cut(s, sep) - if !ok { - return "", "", false - } - return strings.TrimSpace(k), strings.TrimSpace(v), true -} - -// assumes no surrounding comment, whitespace, or profile brackets -func splitProfile(s string) (string, string) { - var first int - for i, r := range s { - if isLineSpace(r) { - if first == 0 { - first = i - } - } else { - if first != 0 { - return s[:first], s[i:] - } - } - } - if first == 0 { - return "", s // type component is effectively blank - } - return "", "" -} - -func isLineSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -func unquote(s string) string { - if isSingleQuoted(s) || isDoubleQuoted(s) { - return s[1 : len(s)-1] - } - return s -} - -// applies various legacy conversions to property values: -// - remote wrapping single/doublequotes -func legacyStrconv(s string) string { - s = unquote(s) - return s -} - -func isSingleQuoted(s string) bool { - return hasAffixes(s, "'", "'") -} - -func isDoubleQuoted(s string) bool { - return hasAffixes(s, `"`, `"`) -} - -func isBracketed(s string) bool { - return hasAffixes(s, "[", "]") -} - -func hasAffixes(s, left, right string) bool { - return strings.HasPrefix(s, left) && strings.HasSuffix(s, right) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go deleted file mode 100644 index 6e9a03744e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go +++ /dev/null @@ -1,32 +0,0 @@ -package ini - -type lineToken interface { - isLineToken() -} - -type lineTokenProfile struct { - Type string - Name string -} - -func (*lineTokenProfile) isLineToken() {} - -type lineTokenProperty struct { - Key string - Value string -} - -func (*lineTokenProperty) isLineToken() {} - -type lineTokenContinuation struct { - Value string -} - -func (*lineTokenContinuation) isLineToken() {} - -type lineTokenSubProperty struct { - Key string - Value string -} - -func (*lineTokenSubProperty) isLineToken() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go deleted file mode 100644 index 89a7736841..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go +++ /dev/null @@ -1,92 +0,0 @@ -package ini - -import ( - "strings" -) - -func tokenize(lines []string) ([]lineToken, error) { - tokens := make([]lineToken, 0, len(lines)) - for _, line := range lines { - if len(strings.TrimSpace(line)) == 0 || isLineComment(line) { - continue - } - - if tok := asProfile(line); tok != nil { - tokens = append(tokens, tok) - } else if tok := asProperty(line); tok != nil { - tokens = append(tokens, tok) - } else if tok := asSubProperty(line); tok != nil { - tokens = append(tokens, tok) - } else if tok := asContinuation(line); tok != nil { - tokens = append(tokens, tok) - } // unrecognized tokens are effectively ignored - } - return tokens, nil -} - -func isLineComment(line string) bool { - trimmed := strings.TrimLeft(line, " \t") - return strings.HasPrefix(trimmed, "#") || strings.HasPrefix(trimmed, ";") -} - -func asProfile(line string) *lineTokenProfile { // " [ type name ] ; comment" - trimmed := strings.TrimSpace(trimProfileComment(line)) // "[ type name ]" - if !isBracketed(trimmed) { - return nil - } - trimmed = trimmed[1 : len(trimmed)-1] // " type name " (or just " name ") - trimmed = strings.TrimSpace(trimmed) // "type name" / "name" - typ, name := splitProfile(trimmed) - return &lineTokenProfile{ - Type: typ, - Name: name, - } -} - -func asProperty(line string) *lineTokenProperty { - if isLineSpace(rune(line[0])) { - return nil - } - - trimmed := trimPropertyComment(line) - trimmed = strings.TrimRight(trimmed, " \t") - k, v, ok := splitProperty(trimmed) - if !ok { - return nil - } - - return &lineTokenProperty{ - Key: strings.ToLower(k), // LEGACY: normalize key case - Value: legacyStrconv(v), // LEGACY: see func docs - } -} - -func asSubProperty(line string) *lineTokenSubProperty { - if !isLineSpace(rune(line[0])) { - return nil - } - - // comments on sub-properties are included in the value - trimmed := strings.TrimLeft(line, " \t") - k, v, ok := splitProperty(trimmed) - if !ok { - return nil - } - - return &lineTokenSubProperty{ // same LEGACY constraints as in normal property - Key: strings.ToLower(k), - Value: legacyStrconv(v), - } -} - -func asContinuation(line string) *lineTokenContinuation { - if !isLineSpace(rune(line[0])) { - return nil - } - - // includes comments like sub-properties - trimmed := strings.TrimLeft(line, " \t") - return &lineTokenContinuation{ - Value: trimmed, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go deleted file mode 100644 index e3706b3c31..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go +++ /dev/null @@ -1,93 +0,0 @@ -package ini - -import ( - "fmt" - "strconv" - "strings" -) - -// ValueType is an enum that will signify what type -// the Value is -type ValueType int - -func (v ValueType) String() string { - switch v { - case NoneType: - return "NONE" - case StringType: - return "STRING" - } - - return "" -} - -// ValueType enums -const ( - NoneType = ValueType(iota) - StringType - QuotedStringType -) - -// Value is a union container -type Value struct { - Type ValueType - - str string - mp map[string]string -} - -// NewStringValue returns a Value type generated using a string input. -func NewStringValue(str string) (Value, error) { - return Value{str: str}, nil -} - -func (v Value) String() string { - switch v.Type { - case StringType: - return fmt.Sprintf("string: %s", string(v.str)) - case QuotedStringType: - return fmt.Sprintf("quoted string: %s", string(v.str)) - default: - return "union not set" - } -} - -// MapValue returns a map value for sub properties -func (v Value) MapValue() map[string]string { - return v.mp -} - -// IntValue returns an integer value -func (v Value) IntValue() (int64, bool) { - i, err := strconv.ParseInt(string(v.str), 0, 64) - if err != nil { - return 0, false - } - return i, true -} - -// FloatValue returns a float value -func (v Value) FloatValue() (float64, bool) { - f, err := strconv.ParseFloat(string(v.str), 64) - if err != nil { - return 0, false - } - return f, true -} - -// BoolValue returns a bool value -func (v Value) BoolValue() (bool, bool) { - // we don't use ParseBool as it recognizes more than what we've - // historically supported - if strings.EqualFold(v.str, "true") { - return true, true - } else if strings.EqualFold(v.str, "false") { - return false, true - } - return false, false -} - -// StringValue returns the string value -func (v Value) StringValue() string { - return v.str -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go deleted file mode 100644 index c8484dcd75..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go +++ /dev/null @@ -1,33 +0,0 @@ -package rand - -import ( - "crypto/rand" - "fmt" - "io" - "math/big" -) - -func init() { - Reader = rand.Reader -} - -// Reader provides a random reader that can reset during testing. -var Reader io.Reader - -var floatMaxBigInt = big.NewInt(1 << 53) - -// Float64 returns a float64 read from an io.Reader source. The returned float will be between [0.0, 1.0). -func Float64(reader io.Reader) (float64, error) { - bi, err := rand.Int(reader, floatMaxBigInt) - if err != nil { - return 0, fmt.Errorf("failed to read random value, %v", err) - } - - return float64(bi.Int64()) / (1 << 53), nil -} - -// CryptoRandFloat64 returns a random float64 obtained from the crypto rand -// source. -func CryptoRandFloat64() (float64, error) { - return Float64(Reader) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go deleted file mode 100644 index 2b42cbe642..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go +++ /dev/null @@ -1,9 +0,0 @@ -package sdk - -// Invalidator provides access to a type's invalidate method to make it -// invalidate it cache. -// -// e.g aws.SafeCredentialsProvider's Invalidate method. -type Invalidator interface { - Invalidate() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go deleted file mode 100644 index 8e8dabad54..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go +++ /dev/null @@ -1,74 +0,0 @@ -package sdk - -import ( - "context" - "time" -) - -func init() { - NowTime = time.Now - Sleep = time.Sleep - SleepWithContext = sleepWithContext -} - -// NowTime is a value for getting the current time. This value can be overridden -// for testing mocking out current time. -var NowTime func() time.Time - -// Sleep is a value for sleeping for a duration. This value can be overridden -// for testing and mocking out sleep duration. -var Sleep func(time.Duration) - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// This value can be overridden for testing and mocking out sleep duration. -var SleepWithContext func(context.Context, time.Duration) error - -// sleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the -// Context's error will be returned. -func sleepWithContext(ctx context.Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} - -// noOpSleepWithContext does nothing, returns immediately. -func noOpSleepWithContext(context.Context, time.Duration) error { - return nil -} - -func noOpSleep(time.Duration) {} - -// TestingUseNopSleep is a utility for disabling sleep across the SDK for -// testing. -func TestingUseNopSleep() func() { - SleepWithContext = noOpSleepWithContext - Sleep = noOpSleep - - return func() { - SleepWithContext = sleepWithContext - Sleep = time.Sleep - } -} - -// TestingUseReferenceTime is a utility for swapping the time function across the SDK to return a specific reference time -// for testing purposes. -func TestingUseReferenceTime(referenceTime time.Time) func() { - NowTime = func() time.Time { - return referenceTime - } - return func() { - NowTime = time.Now - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go deleted file mode 100644 index 6c443988bb..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go +++ /dev/null @@ -1,12 +0,0 @@ -package sdkio - -const ( - // Byte is 8 bits - Byte int64 = 1 - // KibiByte (KiB) is 1024 Bytes - KibiByte = Byte * 1024 - // MebiByte (MiB) is 1024 KiB - MebiByte = KibiByte * 1024 - // GibiByte (GiB) is 1024 MiB - GibiByte = MebiByte * 1024 -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go deleted file mode 100644 index c96b717e08..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go +++ /dev/null @@ -1,47 +0,0 @@ -package shareddefaults - -import ( - "os" - "os/user" - "path/filepath" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "credentials") -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "config") -} - -// UserHomeDir returns the home directory for the user the process is -// running under. -func UserHomeDir() string { - // Ignore errors since we only care about Windows and *nix. - home, _ := os.UserHomeDir() - - if len(home) > 0 { - return home - } - - currUser, _ := user.Current() - if currUser != nil { - home = currUser.HomeDir - } - - return home -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go deleted file mode 100644 index d008ae27cb..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go +++ /dev/null @@ -1,11 +0,0 @@ -package strings - -import ( - "strings" -) - -// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, -// under Unicode case-folding. -func HasPrefixFold(s, prefix string) bool { - return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE deleted file mode 100644 index fe6a62006a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go deleted file mode 100644 index cb70616e80..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package singleflight provides a duplicate function call suppression -// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight -// package. The package is forked, because the package a part of the unstable -// and unversioned golang.org/x/sync module. -// -// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight -package singleflight diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go deleted file mode 100644 index e8a1b17d56..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package singleflight - -import ( - "bytes" - "errors" - "fmt" - "runtime" - "runtime/debug" - "sync" -) - -// errGoexit indicates the runtime.Goexit was called in -// the user given function. -var errGoexit = errors.New("runtime.Goexit was called") - -// A panicError is an arbitrary value recovered from a panic -// with the stack trace during the execution of given function. -type panicError struct { - value interface{} - stack []byte -} - -// Error implements error interface. -func (p *panicError) Error() string { - return fmt.Sprintf("%v\n\n%s", p.value, p.stack) -} - -func newPanicError(v interface{}) error { - stack := debug.Stack() - - // The first line of the stack trace is of the form "goroutine N [status]:" - // but by the time the panic reaches Do the goroutine may no longer exist - // and its status will have changed. Trim out the misleading line. - if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { - stack = stack[line+1:] - } - return &panicError{value: v, stack: stack} -} - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - - // These fields are written once before the WaitGroup is done - // and are only read after the WaitGroup is done. - val interface{} - err error - - // forgotten indicates whether Forget was called with this call's key - // while the call was still in flight. - forgotten bool - - // These fields are read and written with the singleflight - // mutex held before the WaitGroup is done, and are read but - // not written after the WaitGroup is done. - dups int - chans []chan<- Result -} - -// Group represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type Group struct { - mu sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Result holds the results of Do, so they can be passed -// on a channel. -type Result struct { - Val interface{} - Err error - Shared bool -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.mu.Unlock() - c.wg.Wait() - - if e, ok := c.err.(*panicError); ok { - panic(e) - } else if c.err == errGoexit { - runtime.Goexit() - } - return c.val, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - g.doCall(c, key, fn) - return c.val, c.err, c.dups > 0 -} - -// DoChan is like Do but returns a channel that will receive the -// results when they are ready. -// -// The returned channel will not be closed. -func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { - ch := make(chan Result, 1) - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - c.chans = append(c.chans, ch) - g.mu.Unlock() - return ch - } - c := &call{chans: []chan<- Result{ch}} - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - go g.doCall(c, key, fn) - - return ch -} - -// doCall handles the single call for a key. -func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { - normalReturn := false - recovered := false - - // use double-defer to distinguish panic from runtime.Goexit, - // more details see https://golang.org/cl/134395 - defer func() { - // the given function invoked runtime.Goexit - if !normalReturn && !recovered { - c.err = errGoexit - } - - c.wg.Done() - g.mu.Lock() - defer g.mu.Unlock() - if !c.forgotten { - delete(g.m, key) - } - - if e, ok := c.err.(*panicError); ok { - // In order to prevent the waiting channels from being blocked forever, - // needs to ensure that this panic cannot be recovered. - if len(c.chans) > 0 { - go panic(e) - select {} // Keep this goroutine around so that it will appear in the crash dump. - } else { - panic(e) - } - } else if c.err == errGoexit { - // Already in the process of goexit, no need to call again - } else { - // Normal return - for _, ch := range c.chans { - ch <- Result{c.val, c.err, c.dups > 0} - } - } - }() - - func() { - defer func() { - if !normalReturn { - // Ideally, we would wait to take a stack trace until we've determined - // whether this is a panic or a runtime.Goexit. - // - // Unfortunately, the only way we can distinguish the two is to see - // whether the recover stopped the goroutine from terminating, and by - // the time we know that, the part of the stack trace relevant to the - // panic has been discarded. - if r := recover(); r != nil { - c.err = newPanicError(r) - } - } - }() - - c.val, c.err = fn() - normalReturn = true - }() - - if !normalReturn { - recovered = true - } -} - -// Forget tells the singleflight to forget about a key. Future calls -// to Do for this key will call the function rather than waiting for -// an earlier call to complete. -func (g *Group) Forget(key string) { - g.mu.Lock() - if c, ok := g.m[key]; ok { - c.forgotten = true - } - delete(g.m, key) - g.mu.Unlock() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go deleted file mode 100644 index 5d69db5f24..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go +++ /dev/null @@ -1,13 +0,0 @@ -package timeconv - -import "time" - -// FloatSecondsDur converts a fractional seconds to duration. -func FloatSecondsDur(v float64) time.Duration { - return time.Duration(v * float64(time.Second)) -} - -// DurSecondsFloat converts a duration into fractional seconds. -func DurSecondsFloat(d time.Duration) float64 { - return float64(d) / float64(time.Second) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/CHANGELOG.md deleted file mode 100644 index 53164b65bd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/CHANGELOG.md +++ /dev/null @@ -1,419 +0,0 @@ -# v1.27.3 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.2 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.1 (2024-02-23) - -* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.0 (2024-02-22) - -* **Feature**: Add middleware stack snapshot tests. - -# v1.26.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.1 (2024-02-20) - -* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. - -# v1.26.0 (2024-02-16) - -* **Feature**: Add new ClientOptions field to waiter config which allows you to extend the config for operation calls made by waiters. - -# v1.25.1 (2024-02-15) - -* **Bug Fix**: Correct failure to determine the error type in awsJson services that could occur when errors were modeled with a non-string `code` field. - -# v1.25.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.7 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.6 (2023-12-20) - -* No change notes available for this release. - -# v1.24.5 (2023-12-08) - -* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. - -# v1.24.4 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.3 (2023-12-06) - -* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. - -# v1.24.2 (2023-12-01) - -* **Bug Fix**: Correct wrapping of errors in authentication workflow. -* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.1 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.0 (2023-11-29) - -* **Feature**: Expose Options() accessor on service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.3 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.2 (2023-11-28) - -* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. - -# v1.23.1 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.0 (2023-11-17) - -* **Feature**: Documentation and operational updates for Amazon ECR, adding support for pull through cache rules for upstream registries that require authentication. - -# v1.22.2 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.1 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.2 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.1 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2023-09-11) - -* **Feature**: This release will have ValidationException be thrown from ECR LifecyclePolicy APIs in regions LifecyclePolicy is not supported, this includes existing Amazon Dedicated Cloud (ADC) regions. This release will also change Tag: TagValue and Tag: TagKey to required. - -# v1.19.5 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.4 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.3 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.2 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.1 (2023-08-01) - -* No change notes available for this release. - -# v1.19.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.15 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.14 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.13 (2023-06-15) - -* No change notes available for this release. - -# v1.18.12 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.11 (2023-05-04) - -* No change notes available for this release. - -# v1.18.10 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.9 (2023-04-10) - -* No change notes available for this release. - -# v1.18.8 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.7 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.6 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.5 (2023-02-22) - -* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. - -# v1.18.4 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.3 (2023-02-15) - -* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. -* **Bug Fix**: Correct error type parsing for restJson services. - -# v1.18.2 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.1 (2023-01-23) - -* No change notes available for this release. - -# v1.18.0 (2023-01-05) - -* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - -# v1.17.25 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.24 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.23 (2022-11-22) - -* No change notes available for this release. - -# v1.17.22 (2022-11-16) - -* No change notes available for this release. - -# v1.17.21 (2022-11-10) - -* No change notes available for this release. - -# v1.17.20 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.19 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.18 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.17 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.16 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.15 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.14 (2022-08-30) - -* No change notes available for this release. - -# v1.17.13 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.12 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.11 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.10 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.9 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.6 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2022-03-21) - -* **Feature**: This release includes a fix in the DescribeImageScanFindings paginated output. - -# v1.16.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Feature**: Updated service client model to latest release. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2022-02-24) - -* **Feature**: API client updated -* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2021-12-21) - -* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. -* **Feature**: Updated to latest service endpoints - -# v1.11.1 (2021-12-02) - -* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2021-11-30) - -* **Feature**: API client updated - -# v1.10.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2021-11-12) - -* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. -* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature. - -# v1.9.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Feature**: Updated service to latest API model. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-10-21) - -* **Feature**: API client updated -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.1 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-09-24) - -* **Feature**: API client updated - -# v1.6.0 (2021-09-17) - -* **Feature**: Updated API client and endpoints to latest revision. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_client.go deleted file mode 100644 index 9d8a1588aa..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_client.go +++ /dev/null @@ -1,538 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/defaults" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - smithy "github.com/aws/smithy-go" - smithydocument "github.com/aws/smithy-go/document" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net" - "net/http" - "time" -) - -const ServiceID = "ECR" -const ServiceAPIVersion = "2015-09-21" - -// Client provides the API client to make operations call for Amazon EC2 Container -// Registry. -type Client struct { - options Options -} - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - resolveDefaultLogger(&options) - - setResolvedDefaultsMode(&options) - - resolveRetryer(&options) - - resolveHTTPClient(&options) - - resolveHTTPSignerV4(&options) - - resolveEndpointResolverV2(&options) - - resolveAuthSchemeResolver(&options) - - for _, fn := range optFns { - fn(&options) - } - - finalizeRetryMaxAttempts(&options) - - ignoreAnonymousAuth(&options) - - wrapWithAnonymousAuth(&options) - - resolveAuthSchemes(&options) - - client := &Client{ - options: options, - } - - return client -} - -// Options returns a copy of the client configuration. -// -// Callers SHOULD NOT perform mutations on any inner structures within client -// config. Config overrides should instead be made on a per-operation basis through -// functional options. -func (c *Client) Options() Options { - return c.options.Copy() -} - -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { - ctx = middleware.ClearStackValues(ctx) - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - finalizeOperationRetryMaxAttempts(&options, *c) - - finalizeClientEndpointResolverOptions(&options) - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) - if err != nil { - err = &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - return result, metadata, err -} - -type operationInputKey struct{} - -func setOperationInput(ctx context.Context, input interface{}) context.Context { - return middleware.WithStackValue(ctx, operationInputKey{}, input) -} - -func getOperationInput(ctx context.Context) interface{} { - return middleware.GetStackValue(ctx, operationInputKey{}) -} - -type setOperationInputMiddleware struct { -} - -func (*setOperationInputMiddleware) ID() string { - return "setOperationInput" -} - -func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - ctx = setOperationInput(ctx, in.Parameters) - return next.HandleSerialize(ctx, in) -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %v", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %v", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} -func resolveAuthSchemeResolver(options *Options) { - if options.AuthSchemeResolver == nil { - options.AuthSchemeResolver = &defaultAuthSchemeResolver{} - } -} - -func resolveAuthSchemes(options *Options) { - if options.AuthSchemes == nil { - options.AuthSchemes = []smithyhttp.AuthScheme{ - internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ - Signer: options.HTTPSignerV4, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - } - } -} - -type noSmithyDocumentSerde = smithydocument.NoSerde - -type legacyEndpointContextSetter struct { - LegacyResolver EndpointResolver -} - -func (*legacyEndpointContextSetter) ID() string { - return "legacyEndpointContextSetter" -} - -func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.LegacyResolver != nil { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) - } - - return next.HandleInitialize(ctx, in) - -} -func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { - return stack.Initialize.Add(&legacyEndpointContextSetter{ - LegacyResolver: o.EndpointResolver, - }, middleware.Before) -} - -func resolveDefaultLogger(o *Options) { - if o.Logger != nil { - return - } - o.Logger = logging.Nop{} -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -func setResolvedDefaultsMode(o *Options) { - if len(o.resolvedDefaultsMode) > 0 { - return - } - - var mode aws.DefaultsMode - mode.SetFromString(string(o.DefaultsMode)) - - if mode == aws.DefaultsModeAuto { - mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) - } - - o.resolvedDefaultsMode = mode -} - -// NewFromConfig returns a new client from the provided config. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - } - resolveAWSRetryerProvider(cfg, &opts) - resolveAWSRetryMaxAttempts(cfg, &opts) - resolveAWSRetryMode(cfg, &opts) - resolveAWSEndpointResolver(cfg, &opts) - resolveUseDualStackEndpoint(cfg, &opts) - resolveUseFIPSEndpoint(cfg, &opts) - resolveBaseEndpoint(cfg, &opts) - return New(opts, optFns...) -} - -func resolveHTTPClient(o *Options) { - var buildable *awshttp.BuildableClient - - if o.HTTPClient != nil { - var ok bool - buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return - } - } else { - buildable = awshttp.NewBuildableClient() - } - - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { - if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { - dialer.Timeout = dialerTimeout - } - }) - - buildable = buildable.WithTransportOptions(func(transport *http.Transport) { - if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { - transport.TLSHandshakeTimeout = tlsHandshakeTimeout - } - }) - } - - o.HTTPClient = buildable -} - -func resolveRetryer(o *Options) { - if o.Retryer != nil { - return - } - - if len(o.RetryMode) == 0 { - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - o.RetryMode = modeConfig.RetryMode - } - } - if len(o.RetryMode) == 0 { - o.RetryMode = aws.RetryModeStandard - } - - var standardOptions []func(*retry.StandardOptions) - if v := o.RetryMaxAttempts; v != 0 { - standardOptions = append(standardOptions, func(so *retry.StandardOptions) { - so.MaxAttempts = v - }) - } - - switch o.RetryMode { - case aws.RetryModeAdaptive: - var adaptiveOptions []func(*retry.AdaptiveModeOptions) - if len(standardOptions) != 0 { - adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { - ao.StandardOptions = append(ao.StandardOptions, standardOptions...) - }) - } - o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) - - default: - o.Retryer = retry.NewStandard(standardOptions...) - } -} - -func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { - if cfg.Retryer == nil { - return - } - o.Retryer = cfg.Retryer() -} - -func resolveAWSRetryMode(cfg aws.Config, o *Options) { - if len(cfg.RetryMode) == 0 { - return - } - o.RetryMode = cfg.RetryMode -} -func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { - if cfg.RetryMaxAttempts == 0 { - return - } - o.RetryMaxAttempts = cfg.RetryMaxAttempts -} - -func finalizeRetryMaxAttempts(o *Options) { - if o.RetryMaxAttempts == 0 { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func finalizeOperationRetryMaxAttempts(o *Options, client Client) { - if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { - if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { - return - } - o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) -} - -func addClientUserAgent(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ecr", goModuleVersion) - if len(options.AppID) > 0 { - ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) - } - - return nil -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { - id := (*awsmiddleware.RequestUserAgent)(nil).ID() - mw, ok := stack.Build.Get(id) - if !ok { - mw = awsmiddleware.NewRequestUserAgent() - if err := stack.Build.Add(mw, middleware.After); err != nil { - return nil, err - } - } - - ua, ok := mw.(*awsmiddleware.RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) - } - - return ua, nil -} - -type HTTPSignerV4 interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error -} - -func resolveHTTPSignerV4(o *Options) { - if o.HTTPSignerV4 != nil { - return - } - o.HTTPSignerV4 = newDefaultV4Signer(*o) -} - -func newDefaultV4Signer(o Options) *v4.Signer { - return v4.NewSigner(func(so *v4.SignerOptions) { - so.Logger = o.Logger - so.LogSigning = o.ClientLogMode.IsSigning() - }) -} - -func addClientRequestID(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) -} - -func addComputeContentLength(stack *middleware.Stack) error { - return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) -} - -func addRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) -} - -func addRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) -} -func addStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) -} - -func addUnsignedPayload(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -func addComputePayloadSHA256(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -func addContentSHA256Header(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) -} - -func addRetry(stack *middleware.Stack, o Options) error { - attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { - m.LogAttempts = o.ClientLogMode.IsRetries() - }) - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { - return err - } - if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { - return err - } - return nil -} - -// resolves dual-stack endpoint configuration -func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseDualStackEndpoint = value - } - return nil -} - -// resolves FIPS endpoint configuration -func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseFIPSEndpoint = value - } - return nil -} - -func addRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) -} - -func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) - -} - -func addResponseErrorMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) - -} - -func addRequestResponseLogging(stack *middleware.Stack, o Options) error { - return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: o.ClientLogMode.IsRequest(), - LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), - LogResponse: o.ClientLogMode.IsResponse(), - LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), - }, middleware.After) -} - -type disableHTTPSMiddleware struct { - DisableHTTPS bool -} - -func (*disableHTTPSMiddleware) ID() string { - return "disableHTTPS" -} - -func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { - req.URL.Scheme = "http" - } - - return next.HandleFinalize(ctx, in) -} - -func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Finalize.Insert(&disableHTTPSMiddleware{ - DisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "ResolveEndpointV2", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchCheckLayerAvailability.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchCheckLayerAvailability.go deleted file mode 100644 index a3c278bc98..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchCheckLayerAvailability.go +++ /dev/null @@ -1,155 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Checks the availability of one or more image layers in a repository. When an -// image is pushed to a repository, each image layer is checked to verify if it has -// been uploaded before. If it has been uploaded, then the image layer is skipped. -// This operation is used by the Amazon ECR proxy and is not generally used by -// customers for pulling and pushing images. In most cases, you should use the -// docker CLI to pull, tag, and push images. -func (c *Client) BatchCheckLayerAvailability(ctx context.Context, params *BatchCheckLayerAvailabilityInput, optFns ...func(*Options)) (*BatchCheckLayerAvailabilityOutput, error) { - if params == nil { - params = &BatchCheckLayerAvailabilityInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "BatchCheckLayerAvailability", params, optFns, c.addOperationBatchCheckLayerAvailabilityMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*BatchCheckLayerAvailabilityOutput) - out.ResultMetadata = metadata - return out, nil -} - -type BatchCheckLayerAvailabilityInput struct { - - // The digests of the image layers to check. - // - // This member is required. - LayerDigests []string - - // The name of the repository that is associated with the image layers to check. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID associated with the registry that contains - // the image layers to check. If you do not specify a registry, the default - // registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type BatchCheckLayerAvailabilityOutput struct { - - // Any failures associated with the call. - Failures []types.LayerFailure - - // A list of image layer objects corresponding to the image layer references in - // the request. - Layers []types.Layer - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationBatchCheckLayerAvailabilityMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchCheckLayerAvailability{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchCheckLayerAvailability{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "BatchCheckLayerAvailability"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpBatchCheckLayerAvailabilityValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchCheckLayerAvailability(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opBatchCheckLayerAvailability(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "BatchCheckLayerAvailability", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchDeleteImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchDeleteImage.go deleted file mode 100644 index d9846d2585..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchDeleteImage.go +++ /dev/null @@ -1,156 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes a list of specified images within a repository. Images are specified -// with either an imageTag or imageDigest . You can remove a tag from an image by -// specifying the image's tag in your request. When you remove the last tag from an -// image, the image is deleted from your repository. You can completely delete an -// image (and all of its tags) by specifying the image's digest in your request. -func (c *Client) BatchDeleteImage(ctx context.Context, params *BatchDeleteImageInput, optFns ...func(*Options)) (*BatchDeleteImageOutput, error) { - if params == nil { - params = &BatchDeleteImageInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "BatchDeleteImage", params, optFns, c.addOperationBatchDeleteImageMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*BatchDeleteImageOutput) - out.ResultMetadata = metadata - return out, nil -} - -// Deletes specified images within a specified repository. Images are specified -// with either the imageTag or imageDigest . -type BatchDeleteImageInput struct { - - // A list of image ID references that correspond to images to delete. The format - // of the imageIds reference is imageTag=tag or imageDigest=digest . - // - // This member is required. - ImageIds []types.ImageIdentifier - - // The repository that contains the image to delete. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID associated with the registry that contains - // the image to delete. If you do not specify a registry, the default registry is - // assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type BatchDeleteImageOutput struct { - - // Any failures associated with the call. - Failures []types.ImageFailure - - // The image IDs of the deleted images. - ImageIds []types.ImageIdentifier - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationBatchDeleteImageMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchDeleteImage{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchDeleteImage{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "BatchDeleteImage"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpBatchDeleteImageValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchDeleteImage(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opBatchDeleteImage(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "BatchDeleteImage", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetImage.go deleted file mode 100644 index 756873b39f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetImage.go +++ /dev/null @@ -1,158 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Gets detailed information for an image. Images are specified with either an -// imageTag or imageDigest . When an image is pulled, the BatchGetImage API is -// called once to retrieve the image manifest. -func (c *Client) BatchGetImage(ctx context.Context, params *BatchGetImageInput, optFns ...func(*Options)) (*BatchGetImageOutput, error) { - if params == nil { - params = &BatchGetImageInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "BatchGetImage", params, optFns, c.addOperationBatchGetImageMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*BatchGetImageOutput) - out.ResultMetadata = metadata - return out, nil -} - -type BatchGetImageInput struct { - - // A list of image ID references that correspond to images to describe. The format - // of the imageIds reference is imageTag=tag or imageDigest=digest . - // - // This member is required. - ImageIds []types.ImageIdentifier - - // The repository that contains the images to describe. - // - // This member is required. - RepositoryName *string - - // The accepted media types for the request. Valid values: - // application/vnd.docker.distribution.manifest.v1+json | - // application/vnd.docker.distribution.manifest.v2+json | - // application/vnd.oci.image.manifest.v1+json - AcceptedMediaTypes []string - - // The Amazon Web Services account ID associated with the registry that contains - // the images to describe. If you do not specify a registry, the default registry - // is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type BatchGetImageOutput struct { - - // Any failures associated with the call. - Failures []types.ImageFailure - - // A list of image objects corresponding to the image references in the request. - Images []types.Image - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationBatchGetImageMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchGetImage{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchGetImage{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "BatchGetImage"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpBatchGetImageValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchGetImage(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opBatchGetImage(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "BatchGetImage", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetRepositoryScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetRepositoryScanningConfiguration.go deleted file mode 100644 index 4d2a19bb23..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetRepositoryScanningConfiguration.go +++ /dev/null @@ -1,139 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Gets the scanning configuration for one or more repositories. -func (c *Client) BatchGetRepositoryScanningConfiguration(ctx context.Context, params *BatchGetRepositoryScanningConfigurationInput, optFns ...func(*Options)) (*BatchGetRepositoryScanningConfigurationOutput, error) { - if params == nil { - params = &BatchGetRepositoryScanningConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "BatchGetRepositoryScanningConfiguration", params, optFns, c.addOperationBatchGetRepositoryScanningConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*BatchGetRepositoryScanningConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type BatchGetRepositoryScanningConfigurationInput struct { - - // One or more repository names to get the scanning configuration for. - // - // This member is required. - RepositoryNames []string - - noSmithyDocumentSerde -} - -type BatchGetRepositoryScanningConfigurationOutput struct { - - // Any failures associated with the call. - Failures []types.RepositoryScanningConfigurationFailure - - // The scanning configuration for the requested repositories. - ScanningConfigurations []types.RepositoryScanningConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationBatchGetRepositoryScanningConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchGetRepositoryScanningConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchGetRepositoryScanningConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "BatchGetRepositoryScanningConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpBatchGetRepositoryScanningConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchGetRepositoryScanningConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opBatchGetRepositoryScanningConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "BatchGetRepositoryScanningConfiguration", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CompleteLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CompleteLayerUpload.go deleted file mode 100644 index a145f4d5a6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CompleteLayerUpload.go +++ /dev/null @@ -1,166 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Informs Amazon ECR that the image layer upload has completed for a specified -// registry, repository name, and upload ID. You can optionally provide a sha256 -// digest of the image layer for data validation purposes. When an image is pushed, -// the CompleteLayerUpload API is called once per each new image layer to verify -// that the upload has completed. This operation is used by the Amazon ECR proxy -// and is not generally used by customers for pulling and pushing images. In most -// cases, you should use the docker CLI to pull, tag, and push images. -func (c *Client) CompleteLayerUpload(ctx context.Context, params *CompleteLayerUploadInput, optFns ...func(*Options)) (*CompleteLayerUploadOutput, error) { - if params == nil { - params = &CompleteLayerUploadInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CompleteLayerUpload", params, optFns, c.addOperationCompleteLayerUploadMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CompleteLayerUploadOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CompleteLayerUploadInput struct { - - // The sha256 digest of the image layer. - // - // This member is required. - LayerDigests []string - - // The name of the repository to associate with the image layer. - // - // This member is required. - RepositoryName *string - - // The upload ID from a previous InitiateLayerUpload operation to associate with - // the image layer. - // - // This member is required. - UploadId *string - - // The Amazon Web Services account ID associated with the registry to which to - // upload layers. If you do not specify a registry, the default registry is - // assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type CompleteLayerUploadOutput struct { - - // The sha256 digest of the image layer. - LayerDigest *string - - // The registry ID associated with the request. - RegistryId *string - - // The repository name associated with the request. - RepositoryName *string - - // The upload ID associated with the layer. - UploadId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCompleteLayerUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpCompleteLayerUpload{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCompleteLayerUpload{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CompleteLayerUpload"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpCompleteLayerUploadValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCompleteLayerUpload(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opCompleteLayerUpload(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CompleteLayerUpload", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreatePullThroughCacheRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreatePullThroughCacheRule.go deleted file mode 100644 index 8e0f2f8de6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreatePullThroughCacheRule.go +++ /dev/null @@ -1,183 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// Creates a pull through cache rule. A pull through cache rule provides a way to -// cache images from an upstream registry source in your Amazon ECR private -// registry. For more information, see Using pull through cache rules (https://docs.aws.amazon.com/AmazonECR/latest/userguide/pull-through-cache.html) -// in the Amazon Elastic Container Registry User Guide. -func (c *Client) CreatePullThroughCacheRule(ctx context.Context, params *CreatePullThroughCacheRuleInput, optFns ...func(*Options)) (*CreatePullThroughCacheRuleOutput, error) { - if params == nil { - params = &CreatePullThroughCacheRuleInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreatePullThroughCacheRule", params, optFns, c.addOperationCreatePullThroughCacheRuleMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreatePullThroughCacheRuleOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreatePullThroughCacheRuleInput struct { - - // The repository name prefix to use when caching images from the source registry. - // - // This member is required. - EcrRepositoryPrefix *string - - // The registry URL of the upstream public registry to use as the source for the - // pull through cache rule. The following is the syntax to use for each supported - // upstream registry. - // - Amazon ECR Public ( ecr-public ) - public.ecr.aws - // - Docker Hub ( docker-hub ) - registry-1.docker.io - // - Quay ( quay ) - quay.io - // - Kubernetes ( k8s ) - registry.k8s.io - // - GitHub Container Registry ( github-container-registry ) - ghcr.io - // - Microsoft Azure Container Registry ( azure-container-registry ) - - // .azurecr.io - // - // This member is required. - UpstreamRegistryUrl *string - - // The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager - // secret that identifies the credentials to authenticate to the upstream registry. - CredentialArn *string - - // The Amazon Web Services account ID associated with the registry to create the - // pull through cache rule for. If you do not specify a registry, the default - // registry is assumed. - RegistryId *string - - // The name of the upstream registry. - UpstreamRegistry types.UpstreamRegistry - - noSmithyDocumentSerde -} - -type CreatePullThroughCacheRuleOutput struct { - - // The date and time, in JavaScript date format, when the pull through cache rule - // was created. - CreatedAt *time.Time - - // The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager - // secret associated with the pull through cache rule. - CredentialArn *string - - // The Amazon ECR repository prefix associated with the pull through cache rule. - EcrRepositoryPrefix *string - - // The registry ID associated with the request. - RegistryId *string - - // The name of the upstream registry associated with the pull through cache rule. - UpstreamRegistry types.UpstreamRegistry - - // The upstream registry URL associated with the pull through cache rule. - UpstreamRegistryUrl *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreatePullThroughCacheRuleMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreatePullThroughCacheRule{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreatePullThroughCacheRule{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreatePullThroughCacheRule"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpCreatePullThroughCacheRuleValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreatePullThroughCacheRule(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opCreatePullThroughCacheRule(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreatePullThroughCacheRule", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepository.go deleted file mode 100644 index 87088e1dd8..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepository.go +++ /dev/null @@ -1,166 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates a repository. For more information, see Amazon ECR repositories (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) -// in the Amazon Elastic Container Registry User Guide. -func (c *Client) CreateRepository(ctx context.Context, params *CreateRepositoryInput, optFns ...func(*Options)) (*CreateRepositoryOutput, error) { - if params == nil { - params = &CreateRepositoryInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateRepository", params, optFns, c.addOperationCreateRepositoryMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateRepositoryOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateRepositoryInput struct { - - // The name to use for the repository. The repository name may be specified on its - // own (such as nginx-web-app ) or it can be prepended with a namespace to group - // the repository into a category (such as project-a/nginx-web-app ). The - // repository name must start with a letter and can only contain lowercase letters, - // numbers, hyphens, underscores, and forward slashes. - // - // This member is required. - RepositoryName *string - - // The encryption configuration for the repository. This determines how the - // contents of your repository are encrypted at rest. - EncryptionConfiguration *types.EncryptionConfiguration - - // The image scanning configuration for the repository. This determines whether - // images are scanned for known vulnerabilities after being pushed to the - // repository. - ImageScanningConfiguration *types.ImageScanningConfiguration - - // The tag mutability setting for the repository. If this parameter is omitted, - // the default setting of MUTABLE will be used which will allow image tags to be - // overwritten. If IMMUTABLE is specified, all image tags within the repository - // will be immutable which will prevent them from being overwritten. - ImageTagMutability types.ImageTagMutability - - // The Amazon Web Services account ID associated with the registry to create the - // repository. If you do not specify a registry, the default registry is assumed. - RegistryId *string - - // The metadata that you apply to the repository to help you categorize and - // organize them. Each tag consists of a key and an optional value, both of which - // you define. Tag keys can have a maximum character length of 128 characters, and - // tag values can have a maximum length of 256 characters. - Tags []types.Tag - - noSmithyDocumentSerde -} - -type CreateRepositoryOutput struct { - - // The repository that was created. - Repository *types.Repository - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateRepositoryMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateRepository{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateRepository{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateRepository"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpCreateRepositoryValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateRepository(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opCreateRepository(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateRepository", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteLifecyclePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteLifecyclePolicy.go deleted file mode 100644 index 0b5887326b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteLifecyclePolicy.go +++ /dev/null @@ -1,150 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// Deletes the lifecycle policy associated with the specified repository. -func (c *Client) DeleteLifecyclePolicy(ctx context.Context, params *DeleteLifecyclePolicyInput, optFns ...func(*Options)) (*DeleteLifecyclePolicyOutput, error) { - if params == nil { - params = &DeleteLifecyclePolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteLifecyclePolicy", params, optFns, c.addOperationDeleteLifecyclePolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteLifecyclePolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteLifecyclePolicyInput struct { - - // The name of the repository. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID associated with the registry that contains - // the repository. If you do not specify a registry, the default registry is - // assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type DeleteLifecyclePolicyOutput struct { - - // The time stamp of the last time that the lifecycle policy was run. - LastEvaluatedAt *time.Time - - // The JSON lifecycle policy text. - LifecyclePolicyText *string - - // The registry ID associated with the request. - RegistryId *string - - // The repository name associated with the request. - RepositoryName *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteLifecyclePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteLifecyclePolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteLifecyclePolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteLifecyclePolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpDeleteLifecyclePolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteLifecyclePolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opDeleteLifecyclePolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteLifecyclePolicy", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeletePullThroughCacheRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeletePullThroughCacheRule.go deleted file mode 100644 index 3340af4fdc..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeletePullThroughCacheRule.go +++ /dev/null @@ -1,155 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// Deletes a pull through cache rule. -func (c *Client) DeletePullThroughCacheRule(ctx context.Context, params *DeletePullThroughCacheRuleInput, optFns ...func(*Options)) (*DeletePullThroughCacheRuleOutput, error) { - if params == nil { - params = &DeletePullThroughCacheRuleInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeletePullThroughCacheRule", params, optFns, c.addOperationDeletePullThroughCacheRuleMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeletePullThroughCacheRuleOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeletePullThroughCacheRuleInput struct { - - // The Amazon ECR repository prefix associated with the pull through cache rule to - // delete. - // - // This member is required. - EcrRepositoryPrefix *string - - // The Amazon Web Services account ID associated with the registry that contains - // the pull through cache rule. If you do not specify a registry, the default - // registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type DeletePullThroughCacheRuleOutput struct { - - // The timestamp associated with the pull through cache rule. - CreatedAt *time.Time - - // The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager - // secret associated with the pull through cache rule. - CredentialArn *string - - // The Amazon ECR repository prefix associated with the request. - EcrRepositoryPrefix *string - - // The registry ID associated with the request. - RegistryId *string - - // The upstream registry URL associated with the pull through cache rule. - UpstreamRegistryUrl *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeletePullThroughCacheRuleMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeletePullThroughCacheRule{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeletePullThroughCacheRule{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeletePullThroughCacheRule"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpDeletePullThroughCacheRuleValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeletePullThroughCacheRule(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opDeletePullThroughCacheRule(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeletePullThroughCacheRule", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRegistryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRegistryPolicy.go deleted file mode 100644 index 723b0f7fe8..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRegistryPolicy.go +++ /dev/null @@ -1,129 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes the registry permissions policy. -func (c *Client) DeleteRegistryPolicy(ctx context.Context, params *DeleteRegistryPolicyInput, optFns ...func(*Options)) (*DeleteRegistryPolicyOutput, error) { - if params == nil { - params = &DeleteRegistryPolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteRegistryPolicy", params, optFns, c.addOperationDeleteRegistryPolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteRegistryPolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteRegistryPolicyInput struct { - noSmithyDocumentSerde -} - -type DeleteRegistryPolicyOutput struct { - - // The contents of the registry permissions policy that was deleted. - PolicyText *string - - // The registry ID associated with the request. - RegistryId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteRegistryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteRegistryPolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteRegistryPolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteRegistryPolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteRegistryPolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opDeleteRegistryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteRegistryPolicy", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepository.go deleted file mode 100644 index aba9086337..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepository.go +++ /dev/null @@ -1,147 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes a repository. If the repository isn't empty, you must either delete the -// contents of the repository or use the force option to delete the repository and -// have Amazon ECR delete all of its contents on your behalf. -func (c *Client) DeleteRepository(ctx context.Context, params *DeleteRepositoryInput, optFns ...func(*Options)) (*DeleteRepositoryOutput, error) { - if params == nil { - params = &DeleteRepositoryInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteRepository", params, optFns, c.addOperationDeleteRepositoryMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteRepositoryOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteRepositoryInput struct { - - // The name of the repository to delete. - // - // This member is required. - RepositoryName *string - - // If true, deleting the repository force deletes the contents of the repository. - // If false, the repository must be empty before attempting to delete it. - Force bool - - // The Amazon Web Services account ID associated with the registry that contains - // the repository to delete. If you do not specify a registry, the default registry - // is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type DeleteRepositoryOutput struct { - - // The repository that was deleted. - Repository *types.Repository - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteRepositoryMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteRepository{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteRepository{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteRepository"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpDeleteRepositoryValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteRepository(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opDeleteRepository(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteRepository", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryPolicy.go deleted file mode 100644 index 9ebd890616..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryPolicy.go +++ /dev/null @@ -1,147 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes the repository policy associated with the specified repository. -func (c *Client) DeleteRepositoryPolicy(ctx context.Context, params *DeleteRepositoryPolicyInput, optFns ...func(*Options)) (*DeleteRepositoryPolicyOutput, error) { - if params == nil { - params = &DeleteRepositoryPolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteRepositoryPolicy", params, optFns, c.addOperationDeleteRepositoryPolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteRepositoryPolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteRepositoryPolicyInput struct { - - // The name of the repository that is associated with the repository policy to - // delete. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID associated with the registry that contains - // the repository policy to delete. If you do not specify a registry, the default - // registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type DeleteRepositoryPolicyOutput struct { - - // The JSON repository policy that was deleted from the repository. - PolicyText *string - - // The registry ID associated with the request. - RegistryId *string - - // The repository name associated with the request. - RepositoryName *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteRepositoryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteRepositoryPolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteRepositoryPolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteRepositoryPolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpDeleteRepositoryPolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteRepositoryPolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opDeleteRepositoryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteRepositoryPolicy", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageReplicationStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageReplicationStatus.go deleted file mode 100644 index 58b518cf2f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageReplicationStatus.go +++ /dev/null @@ -1,151 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns the replication status for a specified image. -func (c *Client) DescribeImageReplicationStatus(ctx context.Context, params *DescribeImageReplicationStatusInput, optFns ...func(*Options)) (*DescribeImageReplicationStatusOutput, error) { - if params == nil { - params = &DescribeImageReplicationStatusInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DescribeImageReplicationStatus", params, optFns, c.addOperationDescribeImageReplicationStatusMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DescribeImageReplicationStatusOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DescribeImageReplicationStatusInput struct { - - // An object with identifying information for an image in an Amazon ECR repository. - // - // This member is required. - ImageId *types.ImageIdentifier - - // The name of the repository that the image is in. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID associated with the registry. If you do not - // specify a registry, the default registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type DescribeImageReplicationStatusOutput struct { - - // An object with identifying information for an image in an Amazon ECR repository. - ImageId *types.ImageIdentifier - - // The replication status details for the images in the specified repository. - ReplicationStatuses []types.ImageReplicationStatus - - // The repository name associated with the request. - RepositoryName *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDescribeImageReplicationStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeImageReplicationStatus{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeImageReplicationStatus{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeImageReplicationStatus"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpDescribeImageReplicationStatusValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImageReplicationStatus(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opDescribeImageReplicationStatus(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DescribeImageReplicationStatus", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageScanFindings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageScanFindings.go deleted file mode 100644 index 1947caec39..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageScanFindings.go +++ /dev/null @@ -1,473 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithytime "github.com/aws/smithy-go/time" - smithyhttp "github.com/aws/smithy-go/transport/http" - smithywaiter "github.com/aws/smithy-go/waiter" - "github.com/jmespath/go-jmespath" - "time" -) - -// Returns the scan findings for the specified image. -func (c *Client) DescribeImageScanFindings(ctx context.Context, params *DescribeImageScanFindingsInput, optFns ...func(*Options)) (*DescribeImageScanFindingsOutput, error) { - if params == nil { - params = &DescribeImageScanFindingsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DescribeImageScanFindings", params, optFns, c.addOperationDescribeImageScanFindingsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DescribeImageScanFindingsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DescribeImageScanFindingsInput struct { - - // An object with identifying information for an image in an Amazon ECR repository. - // - // This member is required. - ImageId *types.ImageIdentifier - - // The repository for the image for which to describe the scan findings. - // - // This member is required. - RepositoryName *string - - // The maximum number of image scan results returned by DescribeImageScanFindings - // in paginated output. When this parameter is used, DescribeImageScanFindings - // only returns maxResults results in a single page along with a nextToken - // response element. The remaining results of the initial request can be seen by - // sending another DescribeImageScanFindings request with the returned nextToken - // value. This value can be between 1 and 1000. If this parameter is not used, then - // DescribeImageScanFindings returns up to 100 results and a nextToken value, if - // applicable. - MaxResults *int32 - - // The nextToken value returned from a previous paginated DescribeImageScanFindings - // request where maxResults was used and the results exceeded the value of that - // parameter. Pagination continues from the end of the previous results that - // returned the nextToken value. This value is null when there are no more results - // to return. - NextToken *string - - // The Amazon Web Services account ID associated with the registry that contains - // the repository in which to describe the image scan findings for. If you do not - // specify a registry, the default registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type DescribeImageScanFindingsOutput struct { - - // An object with identifying information for an image in an Amazon ECR repository. - ImageId *types.ImageIdentifier - - // The information contained in the image scan findings. - ImageScanFindings *types.ImageScanFindings - - // The current state of the scan. - ImageScanStatus *types.ImageScanStatus - - // The nextToken value to include in a future DescribeImageScanFindings request. - // When the results of a DescribeImageScanFindings request exceed maxResults , this - // value can be used to retrieve the next page of results. This value is null when - // there are no more results to return. - NextToken *string - - // The registry ID associated with the request. - RegistryId *string - - // The repository name associated with the request. - RepositoryName *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDescribeImageScanFindingsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeImageScanFindings{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeImageScanFindings{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeImageScanFindings"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpDescribeImageScanFindingsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImageScanFindings(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -// DescribeImageScanFindingsAPIClient is a client that implements the -// DescribeImageScanFindings operation. -type DescribeImageScanFindingsAPIClient interface { - DescribeImageScanFindings(context.Context, *DescribeImageScanFindingsInput, ...func(*Options)) (*DescribeImageScanFindingsOutput, error) -} - -var _ DescribeImageScanFindingsAPIClient = (*Client)(nil) - -// DescribeImageScanFindingsPaginatorOptions is the paginator options for -// DescribeImageScanFindings -type DescribeImageScanFindingsPaginatorOptions struct { - // The maximum number of image scan results returned by DescribeImageScanFindings - // in paginated output. When this parameter is used, DescribeImageScanFindings - // only returns maxResults results in a single page along with a nextToken - // response element. The remaining results of the initial request can be seen by - // sending another DescribeImageScanFindings request with the returned nextToken - // value. This value can be between 1 and 1000. If this parameter is not used, then - // DescribeImageScanFindings returns up to 100 results and a nextToken value, if - // applicable. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// DescribeImageScanFindingsPaginator is a paginator for DescribeImageScanFindings -type DescribeImageScanFindingsPaginator struct { - options DescribeImageScanFindingsPaginatorOptions - client DescribeImageScanFindingsAPIClient - params *DescribeImageScanFindingsInput - nextToken *string - firstPage bool -} - -// NewDescribeImageScanFindingsPaginator returns a new -// DescribeImageScanFindingsPaginator -func NewDescribeImageScanFindingsPaginator(client DescribeImageScanFindingsAPIClient, params *DescribeImageScanFindingsInput, optFns ...func(*DescribeImageScanFindingsPaginatorOptions)) *DescribeImageScanFindingsPaginator { - if params == nil { - params = &DescribeImageScanFindingsInput{} - } - - options := DescribeImageScanFindingsPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &DescribeImageScanFindingsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *DescribeImageScanFindingsPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next DescribeImageScanFindings page. -func (p *DescribeImageScanFindingsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeImageScanFindingsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - result, err := p.client.DescribeImageScanFindings(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -// ImageScanCompleteWaiterOptions are waiter options for ImageScanCompleteWaiter -type ImageScanCompleteWaiterOptions struct { - - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - // - // Passing options here is functionally equivalent to passing values to this - // config's ClientOptions field that extend the inner client's APIOptions directly. - APIOptions []func(*middleware.Stack) error - - // Functional options to be passed to all operations invoked by this client. - // - // Function values that modify the inner APIOptions are applied after the waiter - // config's own APIOptions modifiers. - ClientOptions []func(*Options) - - // MinDelay is the minimum amount of time to delay between retries. If unset, - // ImageScanCompleteWaiter will use default minimum delay of 5 seconds. Note that - // MinDelay must resolve to a value lesser than or equal to the MaxDelay. - MinDelay time.Duration - - // MaxDelay is the maximum amount of time to delay between retries. If unset or - // set to zero, ImageScanCompleteWaiter will use default max delay of 120 seconds. - // Note that MaxDelay must resolve to value greater than or equal to the MinDelay. - MaxDelay time.Duration - - // LogWaitAttempts is used to enable logging for waiter retry attempts - LogWaitAttempts bool - - // Retryable is function that can be used to override the service defined - // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. - Retryable func(context.Context, *DescribeImageScanFindingsInput, *DescribeImageScanFindingsOutput, error) (bool, error) -} - -// ImageScanCompleteWaiter defines the waiters for ImageScanComplete -type ImageScanCompleteWaiter struct { - client DescribeImageScanFindingsAPIClient - - options ImageScanCompleteWaiterOptions -} - -// NewImageScanCompleteWaiter constructs a ImageScanCompleteWaiter. -func NewImageScanCompleteWaiter(client DescribeImageScanFindingsAPIClient, optFns ...func(*ImageScanCompleteWaiterOptions)) *ImageScanCompleteWaiter { - options := ImageScanCompleteWaiterOptions{} - options.MinDelay = 5 * time.Second - options.MaxDelay = 120 * time.Second - options.Retryable = imageScanCompleteStateRetryable - - for _, fn := range optFns { - fn(&options) - } - return &ImageScanCompleteWaiter{ - client: client, - options: options, - } -} - -// Wait calls the waiter function for ImageScanComplete waiter. The maxWaitDur is -// the maximum wait duration the waiter will wait. The maxWaitDur is required and -// must be greater than zero. -func (w *ImageScanCompleteWaiter) Wait(ctx context.Context, params *DescribeImageScanFindingsInput, maxWaitDur time.Duration, optFns ...func(*ImageScanCompleteWaiterOptions)) error { - _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) - return err -} - -// WaitForOutput calls the waiter function for ImageScanComplete waiter and -// returns the output of the successful operation. The maxWaitDur is the maximum -// wait duration the waiter will wait. The maxWaitDur is required and must be -// greater than zero. -func (w *ImageScanCompleteWaiter) WaitForOutput(ctx context.Context, params *DescribeImageScanFindingsInput, maxWaitDur time.Duration, optFns ...func(*ImageScanCompleteWaiterOptions)) (*DescribeImageScanFindingsOutput, error) { - if maxWaitDur <= 0 { - return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") - } - - options := w.options - for _, fn := range optFns { - fn(&options) - } - - if options.MaxDelay <= 0 { - options.MaxDelay = 120 * time.Second - } - - if options.MinDelay > options.MaxDelay { - return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) - } - - ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) - defer cancelFn() - - logger := smithywaiter.Logger{} - remainingTime := maxWaitDur - - var attempt int64 - for { - - attempt++ - apiOptions := options.APIOptions - start := time.Now() - - if options.LogWaitAttempts { - logger.Attempt = attempt - apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) - apiOptions = append(apiOptions, logger.AddLogger) - } - - out, err := w.client.DescribeImageScanFindings(ctx, params, func(o *Options) { - o.APIOptions = append(o.APIOptions, apiOptions...) - for _, opt := range options.ClientOptions { - opt(o) - } - }) - - retryable, err := options.Retryable(ctx, params, out, err) - if err != nil { - return nil, err - } - if !retryable { - return out, nil - } - - remainingTime -= time.Since(start) - if remainingTime < options.MinDelay || remainingTime <= 0 { - break - } - - // compute exponential backoff between waiter retries - delay, err := smithywaiter.ComputeDelay( - attempt, options.MinDelay, options.MaxDelay, remainingTime, - ) - if err != nil { - return nil, fmt.Errorf("error computing waiter delay, %w", err) - } - - remainingTime -= delay - // sleep for the delay amount before invoking a request - if err := smithytime.SleepWithContext(ctx, delay); err != nil { - return nil, fmt.Errorf("request cancelled while waiting, %w", err) - } - } - return nil, fmt.Errorf("exceeded max wait time for ImageScanComplete waiter") -} - -func imageScanCompleteStateRetryable(ctx context.Context, input *DescribeImageScanFindingsInput, output *DescribeImageScanFindingsOutput, err error) (bool, error) { - - if err == nil { - pathValue, err := jmespath.Search("imageScanStatus.status", output) - if err != nil { - return false, fmt.Errorf("error evaluating waiter state: %w", err) - } - - expectedValue := "COMPLETE" - value, ok := pathValue.(types.ScanStatus) - if !ok { - return false, fmt.Errorf("waiter comparator expected types.ScanStatus value, got %T", pathValue) - } - - if string(value) == expectedValue { - return false, nil - } - } - - if err == nil { - pathValue, err := jmespath.Search("imageScanStatus.status", output) - if err != nil { - return false, fmt.Errorf("error evaluating waiter state: %w", err) - } - - expectedValue := "FAILED" - value, ok := pathValue.(types.ScanStatus) - if !ok { - return false, fmt.Errorf("waiter comparator expected types.ScanStatus value, got %T", pathValue) - } - - if string(value) == expectedValue { - return false, fmt.Errorf("waiter state transitioned to Failure") - } - } - - return true, nil -} - -func newServiceMetadataMiddleware_opDescribeImageScanFindings(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DescribeImageScanFindings", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImages.go deleted file mode 100644 index 70894b9e57..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImages.go +++ /dev/null @@ -1,271 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns metadata about the images in a repository. Beginning with Docker -// version 1.9, the Docker client compresses image layers before pushing them to a -// V2 Docker registry. The output of the docker images command shows the -// uncompressed image size, so it may return a larger image size than the image -// sizes returned by DescribeImages . -func (c *Client) DescribeImages(ctx context.Context, params *DescribeImagesInput, optFns ...func(*Options)) (*DescribeImagesOutput, error) { - if params == nil { - params = &DescribeImagesInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DescribeImages", params, optFns, c.addOperationDescribeImagesMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DescribeImagesOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DescribeImagesInput struct { - - // The repository that contains the images to describe. - // - // This member is required. - RepositoryName *string - - // The filter key and value with which to filter your DescribeImages results. - Filter *types.DescribeImagesFilter - - // The list of image IDs for the requested repository. - ImageIds []types.ImageIdentifier - - // The maximum number of repository results returned by DescribeImages in - // paginated output. When this parameter is used, DescribeImages only returns - // maxResults results in a single page along with a nextToken response element. - // The remaining results of the initial request can be seen by sending another - // DescribeImages request with the returned nextToken value. This value can be - // between 1 and 1000. If this parameter is not used, then DescribeImages returns - // up to 100 results and a nextToken value, if applicable. This option cannot be - // used when you specify images with imageIds . - MaxResults *int32 - - // The nextToken value returned from a previous paginated DescribeImages request - // where maxResults was used and the results exceeded the value of that parameter. - // Pagination continues from the end of the previous results that returned the - // nextToken value. This value is null when there are no more results to return. - // This option cannot be used when you specify images with imageIds . - NextToken *string - - // The Amazon Web Services account ID associated with the registry that contains - // the repository in which to describe images. If you do not specify a registry, - // the default registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type DescribeImagesOutput struct { - - // A list of ImageDetail objects that contain data about the image. - ImageDetails []types.ImageDetail - - // The nextToken value to include in a future DescribeImages request. When the - // results of a DescribeImages request exceed maxResults , this value can be used - // to retrieve the next page of results. This value is null when there are no more - // results to return. - NextToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDescribeImagesMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeImages{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeImages{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeImages"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpDescribeImagesValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImages(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -// DescribeImagesAPIClient is a client that implements the DescribeImages -// operation. -type DescribeImagesAPIClient interface { - DescribeImages(context.Context, *DescribeImagesInput, ...func(*Options)) (*DescribeImagesOutput, error) -} - -var _ DescribeImagesAPIClient = (*Client)(nil) - -// DescribeImagesPaginatorOptions is the paginator options for DescribeImages -type DescribeImagesPaginatorOptions struct { - // The maximum number of repository results returned by DescribeImages in - // paginated output. When this parameter is used, DescribeImages only returns - // maxResults results in a single page along with a nextToken response element. - // The remaining results of the initial request can be seen by sending another - // DescribeImages request with the returned nextToken value. This value can be - // between 1 and 1000. If this parameter is not used, then DescribeImages returns - // up to 100 results and a nextToken value, if applicable. This option cannot be - // used when you specify images with imageIds . - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// DescribeImagesPaginator is a paginator for DescribeImages -type DescribeImagesPaginator struct { - options DescribeImagesPaginatorOptions - client DescribeImagesAPIClient - params *DescribeImagesInput - nextToken *string - firstPage bool -} - -// NewDescribeImagesPaginator returns a new DescribeImagesPaginator -func NewDescribeImagesPaginator(client DescribeImagesAPIClient, params *DescribeImagesInput, optFns ...func(*DescribeImagesPaginatorOptions)) *DescribeImagesPaginator { - if params == nil { - params = &DescribeImagesInput{} - } - - options := DescribeImagesPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &DescribeImagesPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *DescribeImagesPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next DescribeImages page. -func (p *DescribeImagesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeImagesOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - result, err := p.client.DescribeImages(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -func newServiceMetadataMiddleware_opDescribeImages(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DescribeImages", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribePullThroughCacheRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribePullThroughCacheRules.go deleted file mode 100644 index aff6fabeb0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribePullThroughCacheRules.go +++ /dev/null @@ -1,263 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns the pull through cache rules for a registry. -func (c *Client) DescribePullThroughCacheRules(ctx context.Context, params *DescribePullThroughCacheRulesInput, optFns ...func(*Options)) (*DescribePullThroughCacheRulesOutput, error) { - if params == nil { - params = &DescribePullThroughCacheRulesInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DescribePullThroughCacheRules", params, optFns, c.addOperationDescribePullThroughCacheRulesMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DescribePullThroughCacheRulesOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DescribePullThroughCacheRulesInput struct { - - // The Amazon ECR repository prefixes associated with the pull through cache rules - // to return. If no repository prefix value is specified, all pull through cache - // rules are returned. - EcrRepositoryPrefixes []string - - // The maximum number of pull through cache rules returned by - // DescribePullThroughCacheRulesRequest in paginated output. When this parameter is - // used, DescribePullThroughCacheRulesRequest only returns maxResults results in a - // single page along with a nextToken response element. The remaining results of - // the initial request can be seen by sending another - // DescribePullThroughCacheRulesRequest request with the returned nextToken value. - // This value can be between 1 and 1000. If this parameter is not used, then - // DescribePullThroughCacheRulesRequest returns up to 100 results and a nextToken - // value, if applicable. - MaxResults *int32 - - // The nextToken value returned from a previous paginated - // DescribePullThroughCacheRulesRequest request where maxResults was used and the - // results exceeded the value of that parameter. Pagination continues from the end - // of the previous results that returned the nextToken value. This value is null - // when there are no more results to return. - NextToken *string - - // The Amazon Web Services account ID associated with the registry to return the - // pull through cache rules for. If you do not specify a registry, the default - // registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type DescribePullThroughCacheRulesOutput struct { - - // The nextToken value to include in a future DescribePullThroughCacheRulesRequest - // request. When the results of a DescribePullThroughCacheRulesRequest request - // exceed maxResults , this value can be used to retrieve the next page of results. - // This value is null when there are no more results to return. - NextToken *string - - // The details of the pull through cache rules. - PullThroughCacheRules []types.PullThroughCacheRule - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDescribePullThroughCacheRulesMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribePullThroughCacheRules{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribePullThroughCacheRules{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DescribePullThroughCacheRules"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribePullThroughCacheRules(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -// DescribePullThroughCacheRulesAPIClient is a client that implements the -// DescribePullThroughCacheRules operation. -type DescribePullThroughCacheRulesAPIClient interface { - DescribePullThroughCacheRules(context.Context, *DescribePullThroughCacheRulesInput, ...func(*Options)) (*DescribePullThroughCacheRulesOutput, error) -} - -var _ DescribePullThroughCacheRulesAPIClient = (*Client)(nil) - -// DescribePullThroughCacheRulesPaginatorOptions is the paginator options for -// DescribePullThroughCacheRules -type DescribePullThroughCacheRulesPaginatorOptions struct { - // The maximum number of pull through cache rules returned by - // DescribePullThroughCacheRulesRequest in paginated output. When this parameter is - // used, DescribePullThroughCacheRulesRequest only returns maxResults results in a - // single page along with a nextToken response element. The remaining results of - // the initial request can be seen by sending another - // DescribePullThroughCacheRulesRequest request with the returned nextToken value. - // This value can be between 1 and 1000. If this parameter is not used, then - // DescribePullThroughCacheRulesRequest returns up to 100 results and a nextToken - // value, if applicable. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// DescribePullThroughCacheRulesPaginator is a paginator for -// DescribePullThroughCacheRules -type DescribePullThroughCacheRulesPaginator struct { - options DescribePullThroughCacheRulesPaginatorOptions - client DescribePullThroughCacheRulesAPIClient - params *DescribePullThroughCacheRulesInput - nextToken *string - firstPage bool -} - -// NewDescribePullThroughCacheRulesPaginator returns a new -// DescribePullThroughCacheRulesPaginator -func NewDescribePullThroughCacheRulesPaginator(client DescribePullThroughCacheRulesAPIClient, params *DescribePullThroughCacheRulesInput, optFns ...func(*DescribePullThroughCacheRulesPaginatorOptions)) *DescribePullThroughCacheRulesPaginator { - if params == nil { - params = &DescribePullThroughCacheRulesInput{} - } - - options := DescribePullThroughCacheRulesPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &DescribePullThroughCacheRulesPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *DescribePullThroughCacheRulesPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next DescribePullThroughCacheRules page. -func (p *DescribePullThroughCacheRulesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribePullThroughCacheRulesOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - result, err := p.client.DescribePullThroughCacheRules(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -func newServiceMetadataMiddleware_opDescribePullThroughCacheRules(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DescribePullThroughCacheRules", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRegistry.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRegistry.go deleted file mode 100644 index c6eeffe540..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRegistry.go +++ /dev/null @@ -1,132 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Describes the settings for a registry. The replication configuration for a -// repository can be created or updated with the PutReplicationConfiguration API -// action. -func (c *Client) DescribeRegistry(ctx context.Context, params *DescribeRegistryInput, optFns ...func(*Options)) (*DescribeRegistryOutput, error) { - if params == nil { - params = &DescribeRegistryInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DescribeRegistry", params, optFns, c.addOperationDescribeRegistryMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DescribeRegistryOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DescribeRegistryInput struct { - noSmithyDocumentSerde -} - -type DescribeRegistryOutput struct { - - // The ID of the registry. - RegistryId *string - - // The replication configuration for the registry. - ReplicationConfiguration *types.ReplicationConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDescribeRegistryMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeRegistry{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeRegistry{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeRegistry"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeRegistry(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opDescribeRegistry(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DescribeRegistry", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositories.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositories.go deleted file mode 100644 index e2b89ff3c2..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositories.go +++ /dev/null @@ -1,263 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Describes image repositories in a registry. -func (c *Client) DescribeRepositories(ctx context.Context, params *DescribeRepositoriesInput, optFns ...func(*Options)) (*DescribeRepositoriesOutput, error) { - if params == nil { - params = &DescribeRepositoriesInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DescribeRepositories", params, optFns, c.addOperationDescribeRepositoriesMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DescribeRepositoriesOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DescribeRepositoriesInput struct { - - // The maximum number of repository results returned by DescribeRepositories in - // paginated output. When this parameter is used, DescribeRepositories only - // returns maxResults results in a single page along with a nextToken response - // element. The remaining results of the initial request can be seen by sending - // another DescribeRepositories request with the returned nextToken value. This - // value can be between 1 and 1000. If this parameter is not used, then - // DescribeRepositories returns up to 100 results and a nextToken value, if - // applicable. This option cannot be used when you specify repositories with - // repositoryNames . - MaxResults *int32 - - // The nextToken value returned from a previous paginated DescribeRepositories - // request where maxResults was used and the results exceeded the value of that - // parameter. Pagination continues from the end of the previous results that - // returned the nextToken value. This value is null when there are no more results - // to return. This option cannot be used when you specify repositories with - // repositoryNames . This token should be treated as an opaque identifier that is - // only used to retrieve the next items in a list and not for other programmatic - // purposes. - NextToken *string - - // The Amazon Web Services account ID associated with the registry that contains - // the repositories to be described. If you do not specify a registry, the default - // registry is assumed. - RegistryId *string - - // A list of repositories to describe. If this parameter is omitted, then all - // repositories in a registry are described. - RepositoryNames []string - - noSmithyDocumentSerde -} - -type DescribeRepositoriesOutput struct { - - // The nextToken value to include in a future DescribeRepositories request. When - // the results of a DescribeRepositories request exceed maxResults , this value can - // be used to retrieve the next page of results. This value is null when there are - // no more results to return. - NextToken *string - - // A list of repository objects corresponding to valid repositories. - Repositories []types.Repository - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDescribeRepositoriesMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeRepositories{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeRepositories{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeRepositories"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeRepositories(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -// DescribeRepositoriesAPIClient is a client that implements the -// DescribeRepositories operation. -type DescribeRepositoriesAPIClient interface { - DescribeRepositories(context.Context, *DescribeRepositoriesInput, ...func(*Options)) (*DescribeRepositoriesOutput, error) -} - -var _ DescribeRepositoriesAPIClient = (*Client)(nil) - -// DescribeRepositoriesPaginatorOptions is the paginator options for -// DescribeRepositories -type DescribeRepositoriesPaginatorOptions struct { - // The maximum number of repository results returned by DescribeRepositories in - // paginated output. When this parameter is used, DescribeRepositories only - // returns maxResults results in a single page along with a nextToken response - // element. The remaining results of the initial request can be seen by sending - // another DescribeRepositories request with the returned nextToken value. This - // value can be between 1 and 1000. If this parameter is not used, then - // DescribeRepositories returns up to 100 results and a nextToken value, if - // applicable. This option cannot be used when you specify repositories with - // repositoryNames . - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// DescribeRepositoriesPaginator is a paginator for DescribeRepositories -type DescribeRepositoriesPaginator struct { - options DescribeRepositoriesPaginatorOptions - client DescribeRepositoriesAPIClient - params *DescribeRepositoriesInput - nextToken *string - firstPage bool -} - -// NewDescribeRepositoriesPaginator returns a new DescribeRepositoriesPaginator -func NewDescribeRepositoriesPaginator(client DescribeRepositoriesAPIClient, params *DescribeRepositoriesInput, optFns ...func(*DescribeRepositoriesPaginatorOptions)) *DescribeRepositoriesPaginator { - if params == nil { - params = &DescribeRepositoriesInput{} - } - - options := DescribeRepositoriesPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &DescribeRepositoriesPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *DescribeRepositoriesPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next DescribeRepositories page. -func (p *DescribeRepositoriesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeRepositoriesOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - result, err := p.client.DescribeRepositories(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -func newServiceMetadataMiddleware_opDescribeRepositories(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DescribeRepositories", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAuthorizationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAuthorizationToken.go deleted file mode 100644 index 842f8768b1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAuthorizationToken.go +++ /dev/null @@ -1,146 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Retrieves an authorization token. An authorization token represents your IAM -// authentication credentials and can be used to access any Amazon ECR registry -// that your IAM principal has access to. The authorization token is valid for 12 -// hours. The authorizationToken returned is a base64 encoded string that can be -// decoded and used in a docker login command to authenticate to a registry. The -// CLI offers an get-login-password command that simplifies the login process. For -// more information, see Registry authentication (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html#registry_auth) -// in the Amazon Elastic Container Registry User Guide. -func (c *Client) GetAuthorizationToken(ctx context.Context, params *GetAuthorizationTokenInput, optFns ...func(*Options)) (*GetAuthorizationTokenOutput, error) { - if params == nil { - params = &GetAuthorizationTokenInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetAuthorizationToken", params, optFns, c.addOperationGetAuthorizationTokenMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetAuthorizationTokenOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetAuthorizationTokenInput struct { - - // A list of Amazon Web Services account IDs that are associated with the - // registries for which to get AuthorizationData objects. If you do not specify a - // registry, the default registry is assumed. - // - // Deprecated: This field is deprecated. The returned authorization token can be - // used to access any Amazon ECR registry that the IAM principal has access to, - // specifying a registry ID doesn't change the permissions scope of the - // authorization token. - RegistryIds []string - - noSmithyDocumentSerde -} - -type GetAuthorizationTokenOutput struct { - - // A list of authorization token data objects that correspond to the registryIds - // values in the request. - AuthorizationData []types.AuthorizationData - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetAuthorizationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetAuthorizationToken{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetAuthorizationToken{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetAuthorizationToken"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAuthorizationToken(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetAuthorizationToken(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetAuthorizationToken", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetDownloadUrlForLayer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetDownloadUrlForLayer.go deleted file mode 100644 index 80808c7400..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetDownloadUrlForLayer.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Retrieves the pre-signed Amazon S3 download URL corresponding to an image -// layer. You can only get URLs for image layers that are referenced in an image. -// When an image is pulled, the GetDownloadUrlForLayer API is called once per image -// layer that is not already cached. This operation is used by the Amazon ECR proxy -// and is not generally used by customers for pulling and pushing images. In most -// cases, you should use the docker CLI to pull, tag, and push images. -func (c *Client) GetDownloadUrlForLayer(ctx context.Context, params *GetDownloadUrlForLayerInput, optFns ...func(*Options)) (*GetDownloadUrlForLayerOutput, error) { - if params == nil { - params = &GetDownloadUrlForLayerInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetDownloadUrlForLayer", params, optFns, c.addOperationGetDownloadUrlForLayerMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetDownloadUrlForLayerOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetDownloadUrlForLayerInput struct { - - // The digest of the image layer to download. - // - // This member is required. - LayerDigest *string - - // The name of the repository that is associated with the image layer to download. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID associated with the registry that contains - // the image layer to download. If you do not specify a registry, the default - // registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type GetDownloadUrlForLayerOutput struct { - - // The pre-signed Amazon S3 download URL for the requested layer. - DownloadUrl *string - - // The digest of the image layer to download. - LayerDigest *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetDownloadUrlForLayerMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetDownloadUrlForLayer{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetDownloadUrlForLayer{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetDownloadUrlForLayer"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpGetDownloadUrlForLayerValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetDownloadUrlForLayer(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetDownloadUrlForLayer(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetDownloadUrlForLayer", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicy.go deleted file mode 100644 index 158119f9b9..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicy.go +++ /dev/null @@ -1,150 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// Retrieves the lifecycle policy for the specified repository. -func (c *Client) GetLifecyclePolicy(ctx context.Context, params *GetLifecyclePolicyInput, optFns ...func(*Options)) (*GetLifecyclePolicyOutput, error) { - if params == nil { - params = &GetLifecyclePolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetLifecyclePolicy", params, optFns, c.addOperationGetLifecyclePolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetLifecyclePolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetLifecyclePolicyInput struct { - - // The name of the repository. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID associated with the registry that contains - // the repository. If you do not specify a registry, the default registry is - // assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type GetLifecyclePolicyOutput struct { - - // The time stamp of the last time that the lifecycle policy was run. - LastEvaluatedAt *time.Time - - // The JSON lifecycle policy text. - LifecyclePolicyText *string - - // The registry ID associated with the request. - RegistryId *string - - // The repository name associated with the request. - RepositoryName *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetLifecyclePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetLifecyclePolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetLifecyclePolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetLifecyclePolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpGetLifecyclePolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetLifecyclePolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetLifecyclePolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetLifecyclePolicy", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicyPreview.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicyPreview.go deleted file mode 100644 index c88325e08c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicyPreview.go +++ /dev/null @@ -1,489 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithytime "github.com/aws/smithy-go/time" - smithyhttp "github.com/aws/smithy-go/transport/http" - smithywaiter "github.com/aws/smithy-go/waiter" - "github.com/jmespath/go-jmespath" - "time" -) - -// Retrieves the results of the lifecycle policy preview request for the specified -// repository. -func (c *Client) GetLifecyclePolicyPreview(ctx context.Context, params *GetLifecyclePolicyPreviewInput, optFns ...func(*Options)) (*GetLifecyclePolicyPreviewOutput, error) { - if params == nil { - params = &GetLifecyclePolicyPreviewInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetLifecyclePolicyPreview", params, optFns, c.addOperationGetLifecyclePolicyPreviewMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetLifecyclePolicyPreviewOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetLifecyclePolicyPreviewInput struct { - - // The name of the repository. - // - // This member is required. - RepositoryName *string - - // An optional parameter that filters results based on image tag status and all - // tags, if tagged. - Filter *types.LifecyclePolicyPreviewFilter - - // The list of imageIDs to be included. - ImageIds []types.ImageIdentifier - - // The maximum number of repository results returned by - // GetLifecyclePolicyPreviewRequest in
 paginated output. When this parameter is - // used, GetLifecyclePolicyPreviewRequest only returns
 maxResults results in a - // single page along with a nextToken 
 response element. The remaining results of - // the initial request can be seen by sending
 another - // GetLifecyclePolicyPreviewRequest request with the returned nextToken 
 value. - // This value can be between 1 and 1000. If this
 parameter is not used, then - // GetLifecyclePolicyPreviewRequest returns up to
 100 results and a nextToken - // value, if
 applicable. This option cannot be used when you specify images with - // imageIds . - MaxResults *int32 - - // The nextToken value returned from a previous paginated - // GetLifecyclePolicyPreviewRequest request where maxResults was used and the - // results exceeded the value of that parameter. Pagination continues from the end - // of the
 previous results that returned the nextToken value. This value is
 null - // when there are no more results to return. This option cannot be used when you - // specify images with imageIds . - NextToken *string - - // The Amazon Web Services account ID associated with the registry that contains - // the repository. If you do not specify a registry, the default registry is - // assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type GetLifecyclePolicyPreviewOutput struct { - - // The JSON lifecycle policy text. - LifecyclePolicyText *string - - // The nextToken value to include in a future GetLifecyclePolicyPreview request. - // When the results of a GetLifecyclePolicyPreview request exceed maxResults , this - // value can be used to retrieve the next page of results. This value is null when - // there are no more results to return. - NextToken *string - - // The results of the lifecycle policy preview request. - PreviewResults []types.LifecyclePolicyPreviewResult - - // The registry ID associated with the request. - RegistryId *string - - // The repository name associated with the request. - RepositoryName *string - - // The status of the lifecycle policy preview request. - Status types.LifecyclePolicyPreviewStatus - - // The list of images that is returned as a result of the action. - Summary *types.LifecyclePolicyPreviewSummary - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetLifecyclePolicyPreviewMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetLifecyclePolicyPreview{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetLifecyclePolicyPreview{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetLifecyclePolicyPreview"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpGetLifecyclePolicyPreviewValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetLifecyclePolicyPreview(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -// GetLifecyclePolicyPreviewAPIClient is a client that implements the -// GetLifecyclePolicyPreview operation. -type GetLifecyclePolicyPreviewAPIClient interface { - GetLifecyclePolicyPreview(context.Context, *GetLifecyclePolicyPreviewInput, ...func(*Options)) (*GetLifecyclePolicyPreviewOutput, error) -} - -var _ GetLifecyclePolicyPreviewAPIClient = (*Client)(nil) - -// GetLifecyclePolicyPreviewPaginatorOptions is the paginator options for -// GetLifecyclePolicyPreview -type GetLifecyclePolicyPreviewPaginatorOptions struct { - // The maximum number of repository results returned by - // GetLifecyclePolicyPreviewRequest in
 paginated output. When this parameter is - // used, GetLifecyclePolicyPreviewRequest only returns
 maxResults results in a - // single page along with a nextToken 
 response element. The remaining results of - // the initial request can be seen by sending
 another - // GetLifecyclePolicyPreviewRequest request with the returned nextToken 
 value. - // This value can be between 1 and 1000. If this
 parameter is not used, then - // GetLifecyclePolicyPreviewRequest returns up to
 100 results and a nextToken - // value, if
 applicable. This option cannot be used when you specify images with - // imageIds . - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// GetLifecyclePolicyPreviewPaginator is a paginator for GetLifecyclePolicyPreview -type GetLifecyclePolicyPreviewPaginator struct { - options GetLifecyclePolicyPreviewPaginatorOptions - client GetLifecyclePolicyPreviewAPIClient - params *GetLifecyclePolicyPreviewInput - nextToken *string - firstPage bool -} - -// NewGetLifecyclePolicyPreviewPaginator returns a new -// GetLifecyclePolicyPreviewPaginator -func NewGetLifecyclePolicyPreviewPaginator(client GetLifecyclePolicyPreviewAPIClient, params *GetLifecyclePolicyPreviewInput, optFns ...func(*GetLifecyclePolicyPreviewPaginatorOptions)) *GetLifecyclePolicyPreviewPaginator { - if params == nil { - params = &GetLifecyclePolicyPreviewInput{} - } - - options := GetLifecyclePolicyPreviewPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &GetLifecyclePolicyPreviewPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *GetLifecyclePolicyPreviewPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next GetLifecyclePolicyPreview page. -func (p *GetLifecyclePolicyPreviewPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*GetLifecyclePolicyPreviewOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - result, err := p.client.GetLifecyclePolicyPreview(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -// LifecyclePolicyPreviewCompleteWaiterOptions are waiter options for -// LifecyclePolicyPreviewCompleteWaiter -type LifecyclePolicyPreviewCompleteWaiterOptions struct { - - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - // - // Passing options here is functionally equivalent to passing values to this - // config's ClientOptions field that extend the inner client's APIOptions directly. - APIOptions []func(*middleware.Stack) error - - // Functional options to be passed to all operations invoked by this client. - // - // Function values that modify the inner APIOptions are applied after the waiter - // config's own APIOptions modifiers. - ClientOptions []func(*Options) - - // MinDelay is the minimum amount of time to delay between retries. If unset, - // LifecyclePolicyPreviewCompleteWaiter will use default minimum delay of 5 - // seconds. Note that MinDelay must resolve to a value lesser than or equal to the - // MaxDelay. - MinDelay time.Duration - - // MaxDelay is the maximum amount of time to delay between retries. If unset or - // set to zero, LifecyclePolicyPreviewCompleteWaiter will use default max delay of - // 120 seconds. Note that MaxDelay must resolve to value greater than or equal to - // the MinDelay. - MaxDelay time.Duration - - // LogWaitAttempts is used to enable logging for waiter retry attempts - LogWaitAttempts bool - - // Retryable is function that can be used to override the service defined - // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. - Retryable func(context.Context, *GetLifecyclePolicyPreviewInput, *GetLifecyclePolicyPreviewOutput, error) (bool, error) -} - -// LifecyclePolicyPreviewCompleteWaiter defines the waiters for -// LifecyclePolicyPreviewComplete -type LifecyclePolicyPreviewCompleteWaiter struct { - client GetLifecyclePolicyPreviewAPIClient - - options LifecyclePolicyPreviewCompleteWaiterOptions -} - -// NewLifecyclePolicyPreviewCompleteWaiter constructs a -// LifecyclePolicyPreviewCompleteWaiter. -func NewLifecyclePolicyPreviewCompleteWaiter(client GetLifecyclePolicyPreviewAPIClient, optFns ...func(*LifecyclePolicyPreviewCompleteWaiterOptions)) *LifecyclePolicyPreviewCompleteWaiter { - options := LifecyclePolicyPreviewCompleteWaiterOptions{} - options.MinDelay = 5 * time.Second - options.MaxDelay = 120 * time.Second - options.Retryable = lifecyclePolicyPreviewCompleteStateRetryable - - for _, fn := range optFns { - fn(&options) - } - return &LifecyclePolicyPreviewCompleteWaiter{ - client: client, - options: options, - } -} - -// Wait calls the waiter function for LifecyclePolicyPreviewComplete waiter. The -// maxWaitDur is the maximum wait duration the waiter will wait. The maxWaitDur is -// required and must be greater than zero. -func (w *LifecyclePolicyPreviewCompleteWaiter) Wait(ctx context.Context, params *GetLifecyclePolicyPreviewInput, maxWaitDur time.Duration, optFns ...func(*LifecyclePolicyPreviewCompleteWaiterOptions)) error { - _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) - return err -} - -// WaitForOutput calls the waiter function for LifecyclePolicyPreviewComplete -// waiter and returns the output of the successful operation. The maxWaitDur is the -// maximum wait duration the waiter will wait. The maxWaitDur is required and must -// be greater than zero. -func (w *LifecyclePolicyPreviewCompleteWaiter) WaitForOutput(ctx context.Context, params *GetLifecyclePolicyPreviewInput, maxWaitDur time.Duration, optFns ...func(*LifecyclePolicyPreviewCompleteWaiterOptions)) (*GetLifecyclePolicyPreviewOutput, error) { - if maxWaitDur <= 0 { - return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") - } - - options := w.options - for _, fn := range optFns { - fn(&options) - } - - if options.MaxDelay <= 0 { - options.MaxDelay = 120 * time.Second - } - - if options.MinDelay > options.MaxDelay { - return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) - } - - ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) - defer cancelFn() - - logger := smithywaiter.Logger{} - remainingTime := maxWaitDur - - var attempt int64 - for { - - attempt++ - apiOptions := options.APIOptions - start := time.Now() - - if options.LogWaitAttempts { - logger.Attempt = attempt - apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) - apiOptions = append(apiOptions, logger.AddLogger) - } - - out, err := w.client.GetLifecyclePolicyPreview(ctx, params, func(o *Options) { - o.APIOptions = append(o.APIOptions, apiOptions...) - for _, opt := range options.ClientOptions { - opt(o) - } - }) - - retryable, err := options.Retryable(ctx, params, out, err) - if err != nil { - return nil, err - } - if !retryable { - return out, nil - } - - remainingTime -= time.Since(start) - if remainingTime < options.MinDelay || remainingTime <= 0 { - break - } - - // compute exponential backoff between waiter retries - delay, err := smithywaiter.ComputeDelay( - attempt, options.MinDelay, options.MaxDelay, remainingTime, - ) - if err != nil { - return nil, fmt.Errorf("error computing waiter delay, %w", err) - } - - remainingTime -= delay - // sleep for the delay amount before invoking a request - if err := smithytime.SleepWithContext(ctx, delay); err != nil { - return nil, fmt.Errorf("request cancelled while waiting, %w", err) - } - } - return nil, fmt.Errorf("exceeded max wait time for LifecyclePolicyPreviewComplete waiter") -} - -func lifecyclePolicyPreviewCompleteStateRetryable(ctx context.Context, input *GetLifecyclePolicyPreviewInput, output *GetLifecyclePolicyPreviewOutput, err error) (bool, error) { - - if err == nil { - pathValue, err := jmespath.Search("status", output) - if err != nil { - return false, fmt.Errorf("error evaluating waiter state: %w", err) - } - - expectedValue := "COMPLETE" - value, ok := pathValue.(types.LifecyclePolicyPreviewStatus) - if !ok { - return false, fmt.Errorf("waiter comparator expected types.LifecyclePolicyPreviewStatus value, got %T", pathValue) - } - - if string(value) == expectedValue { - return false, nil - } - } - - if err == nil { - pathValue, err := jmespath.Search("status", output) - if err != nil { - return false, fmt.Errorf("error evaluating waiter state: %w", err) - } - - expectedValue := "FAILED" - value, ok := pathValue.(types.LifecyclePolicyPreviewStatus) - if !ok { - return false, fmt.Errorf("waiter comparator expected types.LifecyclePolicyPreviewStatus value, got %T", pathValue) - } - - if string(value) == expectedValue { - return false, fmt.Errorf("waiter state transitioned to Failure") - } - } - - return true, nil -} - -func newServiceMetadataMiddleware_opGetLifecyclePolicyPreview(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetLifecyclePolicyPreview", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryPolicy.go deleted file mode 100644 index 9e66e19217..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryPolicy.go +++ /dev/null @@ -1,129 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Retrieves the permissions policy for a registry. -func (c *Client) GetRegistryPolicy(ctx context.Context, params *GetRegistryPolicyInput, optFns ...func(*Options)) (*GetRegistryPolicyOutput, error) { - if params == nil { - params = &GetRegistryPolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetRegistryPolicy", params, optFns, c.addOperationGetRegistryPolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetRegistryPolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetRegistryPolicyInput struct { - noSmithyDocumentSerde -} - -type GetRegistryPolicyOutput struct { - - // The JSON text of the permissions policy for a registry. - PolicyText *string - - // The ID of the registry. - RegistryId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetRegistryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetRegistryPolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetRegistryPolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetRegistryPolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRegistryPolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetRegistryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetRegistryPolicy", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryScanningConfiguration.go deleted file mode 100644 index ae28541948..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryScanningConfiguration.go +++ /dev/null @@ -1,130 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Retrieves the scanning configuration for a registry. -func (c *Client) GetRegistryScanningConfiguration(ctx context.Context, params *GetRegistryScanningConfigurationInput, optFns ...func(*Options)) (*GetRegistryScanningConfigurationOutput, error) { - if params == nil { - params = &GetRegistryScanningConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetRegistryScanningConfiguration", params, optFns, c.addOperationGetRegistryScanningConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetRegistryScanningConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetRegistryScanningConfigurationInput struct { - noSmithyDocumentSerde -} - -type GetRegistryScanningConfigurationOutput struct { - - // The ID of the registry. - RegistryId *string - - // The scanning configuration for the registry. - ScanningConfiguration *types.RegistryScanningConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetRegistryScanningConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetRegistryScanningConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetRegistryScanningConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetRegistryScanningConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRegistryScanningConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetRegistryScanningConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetRegistryScanningConfiguration", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRepositoryPolicy.go deleted file mode 100644 index c74d9e684d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRepositoryPolicy.go +++ /dev/null @@ -1,146 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Retrieves the repository policy for the specified repository. -func (c *Client) GetRepositoryPolicy(ctx context.Context, params *GetRepositoryPolicyInput, optFns ...func(*Options)) (*GetRepositoryPolicyOutput, error) { - if params == nil { - params = &GetRepositoryPolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetRepositoryPolicy", params, optFns, c.addOperationGetRepositoryPolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetRepositoryPolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetRepositoryPolicyInput struct { - - // The name of the repository with the policy to retrieve. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID associated with the registry that contains - // the repository. If you do not specify a registry, the default registry is - // assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type GetRepositoryPolicyOutput struct { - - // The JSON repository policy text associated with the repository. - PolicyText *string - - // The registry ID associated with the request. - RegistryId *string - - // The repository name associated with the request. - RepositoryName *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetRepositoryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetRepositoryPolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetRepositoryPolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetRepositoryPolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpGetRepositoryPolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRepositoryPolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetRepositoryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetRepositoryPolicy", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_InitiateLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_InitiateLayerUpload.go deleted file mode 100644 index 678763f4dd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_InitiateLayerUpload.go +++ /dev/null @@ -1,150 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Notifies Amazon ECR that you intend to upload an image layer. When an image is -// pushed, the InitiateLayerUpload API is called once per image layer that has not -// already been uploaded. Whether or not an image layer has been uploaded is -// determined by the BatchCheckLayerAvailability API action. This operation is used -// by the Amazon ECR proxy and is not generally used by customers for pulling and -// pushing images. In most cases, you should use the docker CLI to pull, tag, and -// push images. -func (c *Client) InitiateLayerUpload(ctx context.Context, params *InitiateLayerUploadInput, optFns ...func(*Options)) (*InitiateLayerUploadOutput, error) { - if params == nil { - params = &InitiateLayerUploadInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "InitiateLayerUpload", params, optFns, c.addOperationInitiateLayerUploadMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*InitiateLayerUploadOutput) - out.ResultMetadata = metadata - return out, nil -} - -type InitiateLayerUploadInput struct { - - // The name of the repository to which you intend to upload layers. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID associated with the registry to which you - // intend to upload layers. If you do not specify a registry, the default registry - // is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type InitiateLayerUploadOutput struct { - - // The size, in bytes, that Amazon ECR expects future layer part uploads to be. - PartSize *int64 - - // The upload ID for the layer upload. This parameter is passed to further - // UploadLayerPart and CompleteLayerUpload operations. - UploadId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationInitiateLayerUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpInitiateLayerUpload{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpInitiateLayerUpload{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "InitiateLayerUpload"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpInitiateLayerUploadValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opInitiateLayerUpload(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opInitiateLayerUpload(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "InitiateLayerUpload", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImages.go deleted file mode 100644 index 95a1ce2ca3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImages.go +++ /dev/null @@ -1,267 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Lists all the image IDs for the specified repository. You can filter images -// based on whether or not they are tagged by using the tagStatus filter and -// specifying either TAGGED , UNTAGGED or ANY . For example, you can filter your -// results to return only UNTAGGED images and then pipe that result to a -// BatchDeleteImage operation to delete them. Or, you can filter your results to -// return only TAGGED images to list all of the tags in your repository. -func (c *Client) ListImages(ctx context.Context, params *ListImagesInput, optFns ...func(*Options)) (*ListImagesOutput, error) { - if params == nil { - params = &ListImagesInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListImages", params, optFns, c.addOperationListImagesMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListImagesOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListImagesInput struct { - - // The repository with image IDs to be listed. - // - // This member is required. - RepositoryName *string - - // The filter key and value with which to filter your ListImages results. - Filter *types.ListImagesFilter - - // The maximum number of image results returned by ListImages in paginated output. - // When this parameter is used, ListImages only returns maxResults results in a - // single page along with a nextToken response element. The remaining results of - // the initial request can be seen by sending another ListImages request with the - // returned nextToken value. This value can be between 1 and 1000. If this - // parameter is not used, then ListImages returns up to 100 results and a nextToken - // value, if applicable. - MaxResults *int32 - - // The nextToken value returned from a previous paginated ListImages request where - // maxResults was used and the results exceeded the value of that parameter. - // Pagination continues from the end of the previous results that returned the - // nextToken value. This value is null when there are no more results to return. - // This token should be treated as an opaque identifier that is only used to - // retrieve the next items in a list and not for other programmatic purposes. - NextToken *string - - // The Amazon Web Services account ID associated with the registry that contains - // the repository in which to list images. If you do not specify a registry, the - // default registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type ListImagesOutput struct { - - // The list of image IDs for the requested repository. - ImageIds []types.ImageIdentifier - - // The nextToken value to include in a future ListImages request. When the results - // of a ListImages request exceed maxResults , this value can be used to retrieve - // the next page of results. This value is null when there are no more results to - // return. - NextToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListImagesMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpListImages{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListImages{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListImages"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpListImagesValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListImages(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -// ListImagesAPIClient is a client that implements the ListImages operation. -type ListImagesAPIClient interface { - ListImages(context.Context, *ListImagesInput, ...func(*Options)) (*ListImagesOutput, error) -} - -var _ ListImagesAPIClient = (*Client)(nil) - -// ListImagesPaginatorOptions is the paginator options for ListImages -type ListImagesPaginatorOptions struct { - // The maximum number of image results returned by ListImages in paginated output. - // When this parameter is used, ListImages only returns maxResults results in a - // single page along with a nextToken response element. The remaining results of - // the initial request can be seen by sending another ListImages request with the - // returned nextToken value. This value can be between 1 and 1000. If this - // parameter is not used, then ListImages returns up to 100 results and a nextToken - // value, if applicable. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListImagesPaginator is a paginator for ListImages -type ListImagesPaginator struct { - options ListImagesPaginatorOptions - client ListImagesAPIClient - params *ListImagesInput - nextToken *string - firstPage bool -} - -// NewListImagesPaginator returns a new ListImagesPaginator -func NewListImagesPaginator(client ListImagesAPIClient, params *ListImagesInput, optFns ...func(*ListImagesPaginatorOptions)) *ListImagesPaginator { - if params == nil { - params = &ListImagesInput{} - } - - options := ListImagesPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListImagesPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListImagesPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next ListImages page. -func (p *ListImagesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListImagesOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - result, err := p.client.ListImages(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -func newServiceMetadataMiddleware_opListImages(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListImages", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListTagsForResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListTagsForResource.go deleted file mode 100644 index a833645c98..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListTagsForResource.go +++ /dev/null @@ -1,137 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// List the tags for an Amazon ECR resource. -func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForResourceInput, optFns ...func(*Options)) (*ListTagsForResourceOutput, error) { - if params == nil { - params = &ListTagsForResourceInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListTagsForResource", params, optFns, c.addOperationListTagsForResourceMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListTagsForResourceOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListTagsForResourceInput struct { - - // The Amazon Resource Name (ARN) that identifies the resource for which to list - // the tags. Currently, the only supported resource is an Amazon ECR repository. - // - // This member is required. - ResourceArn *string - - noSmithyDocumentSerde -} - -type ListTagsForResourceOutput struct { - - // The tags for the resource. - Tags []types.Tag - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpListTagsForResource{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListTagsForResource{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListTagsForResource"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpListTagsForResourceValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTagsForResource(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opListTagsForResource(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListTagsForResource", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImage.go deleted file mode 100644 index 3bbebb033e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImage.go +++ /dev/null @@ -1,164 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates or updates the image manifest and tags associated with an image. When -// an image is pushed and all new image layers have been uploaded, the PutImage API -// is called once to create or update the image manifest and the tags associated -// with the image. This operation is used by the Amazon ECR proxy and is not -// generally used by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. -func (c *Client) PutImage(ctx context.Context, params *PutImageInput, optFns ...func(*Options)) (*PutImageOutput, error) { - if params == nil { - params = &PutImageInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutImage", params, optFns, c.addOperationPutImageMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutImageOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutImageInput struct { - - // The image manifest corresponding to the image to be uploaded. - // - // This member is required. - ImageManifest *string - - // The name of the repository in which to put the image. - // - // This member is required. - RepositoryName *string - - // The image digest of the image manifest corresponding to the image. - ImageDigest *string - - // The media type of the image manifest. If you push an image manifest that does - // not contain the mediaType field, you must specify the imageManifestMediaType in - // the request. - ImageManifestMediaType *string - - // The tag to associate with the image. This parameter is required for images that - // use the Docker Image Manifest V2 Schema 2 or Open Container Initiative (OCI) - // formats. - ImageTag *string - - // The Amazon Web Services account ID associated with the registry that contains - // the repository in which to put the image. If you do not specify a registry, the - // default registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type PutImageOutput struct { - - // Details of the image uploaded. - Image *types.Image - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutImageMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutImage{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutImage{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutImage"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpPutImageValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutImage(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opPutImage(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutImage", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageScanningConfiguration.go deleted file mode 100644 index e8ff335886..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageScanningConfiguration.go +++ /dev/null @@ -1,158 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// The PutImageScanningConfiguration API is being deprecated, in favor of -// specifying the image scanning configuration at the registry level. For more -// information, see PutRegistryScanningConfiguration . Updates the image scanning -// configuration for the specified repository. -func (c *Client) PutImageScanningConfiguration(ctx context.Context, params *PutImageScanningConfigurationInput, optFns ...func(*Options)) (*PutImageScanningConfigurationOutput, error) { - if params == nil { - params = &PutImageScanningConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutImageScanningConfiguration", params, optFns, c.addOperationPutImageScanningConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutImageScanningConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutImageScanningConfigurationInput struct { - - // The image scanning configuration for the repository. This setting determines - // whether images are scanned for known vulnerabilities after being pushed to the - // repository. - // - // This member is required. - ImageScanningConfiguration *types.ImageScanningConfiguration - - // The name of the repository in which to update the image scanning configuration - // setting. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID associated with the registry that contains - // the repository in which to update the image scanning configuration setting. If - // you do not specify a registry, the default registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type PutImageScanningConfigurationOutput struct { - - // The image scanning configuration setting for the repository. - ImageScanningConfiguration *types.ImageScanningConfiguration - - // The registry ID associated with the request. - RegistryId *string - - // The repository name associated with the request. - RepositoryName *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutImageScanningConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutImageScanningConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutImageScanningConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutImageScanningConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpPutImageScanningConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutImageScanningConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opPutImageScanningConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutImageScanningConfiguration", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageTagMutability.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageTagMutability.go deleted file mode 100644 index 8fde2774f6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageTagMutability.go +++ /dev/null @@ -1,156 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Updates the image tag mutability settings for the specified repository. For -// more information, see Image tag mutability (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-tag-mutability.html) -// in the Amazon Elastic Container Registry User Guide. -func (c *Client) PutImageTagMutability(ctx context.Context, params *PutImageTagMutabilityInput, optFns ...func(*Options)) (*PutImageTagMutabilityOutput, error) { - if params == nil { - params = &PutImageTagMutabilityInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutImageTagMutability", params, optFns, c.addOperationPutImageTagMutabilityMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutImageTagMutabilityOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutImageTagMutabilityInput struct { - - // The tag mutability setting for the repository. If MUTABLE is specified, image - // tags can be overwritten. If IMMUTABLE is specified, all image tags within the - // repository will be immutable which will prevent them from being overwritten. - // - // This member is required. - ImageTagMutability types.ImageTagMutability - - // The name of the repository in which to update the image tag mutability settings. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID associated with the registry that contains - // the repository in which to update the image tag mutability settings. If you do - // not specify a registry, the default registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type PutImageTagMutabilityOutput struct { - - // The image tag mutability setting for the repository. - ImageTagMutability types.ImageTagMutability - - // The registry ID associated with the request. - RegistryId *string - - // The repository name associated with the request. - RepositoryName *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutImageTagMutabilityMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutImageTagMutability{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutImageTagMutability{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutImageTagMutability"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpPutImageTagMutabilityValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutImageTagMutability(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opPutImageTagMutability(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutImageTagMutability", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutLifecyclePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutLifecyclePolicy.go deleted file mode 100644 index 133ddac4ee..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutLifecyclePolicy.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates or updates the lifecycle policy for the specified repository. For more -// information, see Lifecycle policy template (https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html) -// . -func (c *Client) PutLifecyclePolicy(ctx context.Context, params *PutLifecyclePolicyInput, optFns ...func(*Options)) (*PutLifecyclePolicyOutput, error) { - if params == nil { - params = &PutLifecyclePolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutLifecyclePolicy", params, optFns, c.addOperationPutLifecyclePolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutLifecyclePolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutLifecyclePolicyInput struct { - - // The JSON repository policy text to apply to the repository. - // - // This member is required. - LifecyclePolicyText *string - - // The name of the repository to receive the policy. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID associated with the registry that contains - // the repository. If you do
 not specify a registry, the default registry is - // assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type PutLifecyclePolicyOutput struct { - - // The JSON repository policy text. - LifecyclePolicyText *string - - // The registry ID associated with the request. - RegistryId *string - - // The repository name associated with the request. - RepositoryName *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutLifecyclePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutLifecyclePolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutLifecyclePolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutLifecyclePolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpPutLifecyclePolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutLifecyclePolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opPutLifecyclePolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutLifecyclePolicy", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryPolicy.go deleted file mode 100644 index 92ca78c10f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryPolicy.go +++ /dev/null @@ -1,144 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates or updates the permissions policy for your registry. A registry policy -// is used to specify permissions for another Amazon Web Services account and is -// used when configuring cross-account replication. For more information, see -// Registry permissions (https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html) -// in the Amazon Elastic Container Registry User Guide. -func (c *Client) PutRegistryPolicy(ctx context.Context, params *PutRegistryPolicyInput, optFns ...func(*Options)) (*PutRegistryPolicyOutput, error) { - if params == nil { - params = &PutRegistryPolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutRegistryPolicy", params, optFns, c.addOperationPutRegistryPolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutRegistryPolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutRegistryPolicyInput struct { - - // The JSON policy text to apply to your registry. The policy text follows the - // same format as IAM policy text. For more information, see Registry permissions (https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html) - // in the Amazon Elastic Container Registry User Guide. - // - // This member is required. - PolicyText *string - - noSmithyDocumentSerde -} - -type PutRegistryPolicyOutput struct { - - // The JSON policy text for your registry. - PolicyText *string - - // The registry ID. - RegistryId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutRegistryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutRegistryPolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutRegistryPolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutRegistryPolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpPutRegistryPolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutRegistryPolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opPutRegistryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutRegistryPolicy", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryScanningConfiguration.go deleted file mode 100644 index 74e4ac2895..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryScanningConfiguration.go +++ /dev/null @@ -1,147 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates or updates the scanning configuration for your private registry. -func (c *Client) PutRegistryScanningConfiguration(ctx context.Context, params *PutRegistryScanningConfigurationInput, optFns ...func(*Options)) (*PutRegistryScanningConfigurationOutput, error) { - if params == nil { - params = &PutRegistryScanningConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutRegistryScanningConfiguration", params, optFns, c.addOperationPutRegistryScanningConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutRegistryScanningConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutRegistryScanningConfigurationInput struct { - - // The scanning rules to use for the registry. A scanning rule is used to - // determine which repository filters are used and at what frequency scanning will - // occur. - Rules []types.RegistryScanningRule - - // The scanning type to set for the registry. When a registry scanning - // configuration is not defined, by default the BASIC scan type is used. When - // basic scanning is used, you may specify filters to determine which individual - // repositories, or all repositories, are scanned when new images are pushed to - // those repositories. Alternatively, you can do manual scans of images with basic - // scanning. When the ENHANCED scan type is set, Amazon Inspector provides - // automated vulnerability scanning. You may choose between continuous scanning or - // scan on push and you may specify filters to determine which individual - // repositories, or all repositories, are scanned. - ScanType types.ScanType - - noSmithyDocumentSerde -} - -type PutRegistryScanningConfigurationOutput struct { - - // The scanning configuration for your registry. - RegistryScanningConfiguration *types.RegistryScanningConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutRegistryScanningConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutRegistryScanningConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutRegistryScanningConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutRegistryScanningConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpPutRegistryScanningConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutRegistryScanningConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opPutRegistryScanningConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutRegistryScanningConfiguration", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutReplicationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutReplicationConfiguration.go deleted file mode 100644 index 9d5b3cc821..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutReplicationConfiguration.go +++ /dev/null @@ -1,145 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates or updates the replication configuration for a registry. The existing -// replication configuration for a repository can be retrieved with the -// DescribeRegistry API action. The first time the PutReplicationConfiguration API -// is called, a service-linked IAM role is created in your account for the -// replication process. For more information, see Using service-linked roles for -// Amazon ECR (https://docs.aws.amazon.com/AmazonECR/latest/userguide/using-service-linked-roles.html) -// in the Amazon Elastic Container Registry User Guide. When configuring -// cross-account replication, the destination account must grant the source account -// permission to replicate. This permission is controlled using a registry -// permissions policy. For more information, see PutRegistryPolicy . -func (c *Client) PutReplicationConfiguration(ctx context.Context, params *PutReplicationConfigurationInput, optFns ...func(*Options)) (*PutReplicationConfigurationOutput, error) { - if params == nil { - params = &PutReplicationConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutReplicationConfiguration", params, optFns, c.addOperationPutReplicationConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutReplicationConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutReplicationConfigurationInput struct { - - // An object representing the replication configuration for a registry. - // - // This member is required. - ReplicationConfiguration *types.ReplicationConfiguration - - noSmithyDocumentSerde -} - -type PutReplicationConfigurationOutput struct { - - // The contents of the replication configuration for the registry. - ReplicationConfiguration *types.ReplicationConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutReplicationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutReplicationConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutReplicationConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutReplicationConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpPutReplicationConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutReplicationConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opPutReplicationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutReplicationConfiguration", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_SetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_SetRepositoryPolicy.go deleted file mode 100644 index 62a700b2ad..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_SetRepositoryPolicy.go +++ /dev/null @@ -1,161 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Applies a repository policy to the specified repository to control access -// permissions. For more information, see Amazon ECR Repository policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html) -// in the Amazon Elastic Container Registry User Guide. -func (c *Client) SetRepositoryPolicy(ctx context.Context, params *SetRepositoryPolicyInput, optFns ...func(*Options)) (*SetRepositoryPolicyOutput, error) { - if params == nil { - params = &SetRepositoryPolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "SetRepositoryPolicy", params, optFns, c.addOperationSetRepositoryPolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*SetRepositoryPolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type SetRepositoryPolicyInput struct { - - // The JSON repository policy text to apply to the repository. For more - // information, see Amazon ECR repository policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) - // in the Amazon Elastic Container Registry User Guide. - // - // This member is required. - PolicyText *string - - // The name of the repository to receive the policy. - // - // This member is required. - RepositoryName *string - - // If the policy you are attempting to set on a repository policy would prevent - // you from setting another policy in the future, you must force the - // SetRepositoryPolicy operation. This is intended to prevent accidental repository - // lock outs. - Force bool - - // The Amazon Web Services account ID associated with the registry that contains - // the repository. If you do not specify a registry, the default registry is - // assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type SetRepositoryPolicyOutput struct { - - // The JSON repository policy text applied to the repository. - PolicyText *string - - // The registry ID associated with the request. - RegistryId *string - - // The repository name associated with the request. - RepositoryName *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationSetRepositoryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpSetRepositoryPolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpSetRepositoryPolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "SetRepositoryPolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpSetRepositoryPolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSetRepositoryPolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opSetRepositoryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "SetRepositoryPolicy", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartImageScan.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartImageScan.go deleted file mode 100644 index 1a2cd6eb72..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartImageScan.go +++ /dev/null @@ -1,158 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Starts an image vulnerability scan. An image scan can only be started once per -// 24 hours on an individual image. This limit includes if an image was scanned on -// initial push. For more information, see Image scanning (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html) -// in the Amazon Elastic Container Registry User Guide. -func (c *Client) StartImageScan(ctx context.Context, params *StartImageScanInput, optFns ...func(*Options)) (*StartImageScanOutput, error) { - if params == nil { - params = &StartImageScanInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "StartImageScan", params, optFns, c.addOperationStartImageScanMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*StartImageScanOutput) - out.ResultMetadata = metadata - return out, nil -} - -type StartImageScanInput struct { - - // An object with identifying information for an image in an Amazon ECR repository. - // - // This member is required. - ImageId *types.ImageIdentifier - - // The name of the repository that contains the images to scan. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID associated with the registry that contains - // the repository in which to start an image scan request. If you do not specify a - // registry, the default registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type StartImageScanOutput struct { - - // An object with identifying information for an image in an Amazon ECR repository. - ImageId *types.ImageIdentifier - - // The current state of the scan. - ImageScanStatus *types.ImageScanStatus - - // The registry ID associated with the request. - RegistryId *string - - // The repository name associated with the request. - RepositoryName *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationStartImageScanMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpStartImageScan{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStartImageScan{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "StartImageScan"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpStartImageScanValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartImageScan(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opStartImageScan(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "StartImageScan", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartLifecyclePolicyPreview.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartLifecyclePolicyPreview.go deleted file mode 100644 index 95855b0b97..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartLifecyclePolicyPreview.go +++ /dev/null @@ -1,156 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Starts a preview of a lifecycle policy for the specified repository. This -// allows you to see the results before associating the lifecycle policy with the -// repository. -func (c *Client) StartLifecyclePolicyPreview(ctx context.Context, params *StartLifecyclePolicyPreviewInput, optFns ...func(*Options)) (*StartLifecyclePolicyPreviewOutput, error) { - if params == nil { - params = &StartLifecyclePolicyPreviewInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "StartLifecyclePolicyPreview", params, optFns, c.addOperationStartLifecyclePolicyPreviewMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*StartLifecyclePolicyPreviewOutput) - out.ResultMetadata = metadata - return out, nil -} - -type StartLifecyclePolicyPreviewInput struct { - - // The name of the repository to be evaluated. - // - // This member is required. - RepositoryName *string - - // The policy to be evaluated against. If you do not specify a policy, the current - // policy for the repository is used. - LifecyclePolicyText *string - - // The Amazon Web Services account ID associated with the registry that contains - // the repository. If you do not specify a registry, the default registry is - // assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type StartLifecyclePolicyPreviewOutput struct { - - // The JSON repository policy text. - LifecyclePolicyText *string - - // The registry ID associated with the request. - RegistryId *string - - // The repository name associated with the request. - RepositoryName *string - - // The status of the lifecycle policy preview request. - Status types.LifecyclePolicyPreviewStatus - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationStartLifecyclePolicyPreviewMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpStartLifecyclePolicyPreview{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStartLifecyclePolicyPreview{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "StartLifecyclePolicyPreview"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpStartLifecyclePolicyPreviewValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartLifecyclePolicyPreview(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opStartLifecyclePolicyPreview(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "StartLifecyclePolicyPreview", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_TagResource.go deleted file mode 100644 index 683bc667e8..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_TagResource.go +++ /dev/null @@ -1,141 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Adds specified tags to a resource with the specified ARN. Existing tags on a -// resource are not changed if they are not specified in the request parameters. -func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { - if params == nil { - params = &TagResourceInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "TagResource", params, optFns, c.addOperationTagResourceMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*TagResourceOutput) - out.ResultMetadata = metadata - return out, nil -} - -type TagResourceInput struct { - - // The Amazon Resource Name (ARN) of the the resource to which to add tags. - // Currently, the only supported resource is an Amazon ECR repository. - // - // This member is required. - ResourceArn *string - - // The tags to add to the resource. A tag is an array of key-value pairs. Tag keys - // can have a maximum character length of 128 characters, and tag values can have a - // maximum length of 256 characters. - // - // This member is required. - Tags []types.Tag - - noSmithyDocumentSerde -} - -type TagResourceOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpTagResource{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpTagResource{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "TagResource"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpTagResourceValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "TagResource", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UntagResource.go deleted file mode 100644 index d1f57f68c5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UntagResource.go +++ /dev/null @@ -1,137 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes specified tags from a resource. -func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { - if params == nil { - params = &UntagResourceInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "UntagResource", params, optFns, c.addOperationUntagResourceMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*UntagResourceOutput) - out.ResultMetadata = metadata - return out, nil -} - -type UntagResourceInput struct { - - // The Amazon Resource Name (ARN) of the resource from which to remove tags. - // Currently, the only supported resource is an Amazon ECR repository. - // - // This member is required. - ResourceArn *string - - // The keys of the tags to be removed. - // - // This member is required. - TagKeys []string - - noSmithyDocumentSerde -} - -type UntagResourceOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpUntagResource{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUntagResource{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "UntagResource"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpUntagResourceValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "UntagResource", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UpdatePullThroughCacheRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UpdatePullThroughCacheRule.go deleted file mode 100644 index 7b55a8016a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UpdatePullThroughCacheRule.go +++ /dev/null @@ -1,158 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// Updates an existing pull through cache rule. -func (c *Client) UpdatePullThroughCacheRule(ctx context.Context, params *UpdatePullThroughCacheRuleInput, optFns ...func(*Options)) (*UpdatePullThroughCacheRuleOutput, error) { - if params == nil { - params = &UpdatePullThroughCacheRuleInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "UpdatePullThroughCacheRule", params, optFns, c.addOperationUpdatePullThroughCacheRuleMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*UpdatePullThroughCacheRuleOutput) - out.ResultMetadata = metadata - return out, nil -} - -type UpdatePullThroughCacheRuleInput struct { - - // The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager - // secret that identifies the credentials to authenticate to the upstream registry. - // - // This member is required. - CredentialArn *string - - // The repository name prefix to use when caching images from the source registry. - // - // This member is required. - EcrRepositoryPrefix *string - - // The Amazon Web Services account ID associated with the registry associated with - // the pull through cache rule. If you do not specify a registry, the default - // registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type UpdatePullThroughCacheRuleOutput struct { - - // The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager - // secret associated with the pull through cache rule. - CredentialArn *string - - // The Amazon ECR repository prefix associated with the pull through cache rule. - EcrRepositoryPrefix *string - - // The registry ID associated with the request. - RegistryId *string - - // The date and time, in JavaScript date format, when the pull through cache rule - // was updated. - UpdatedAt *time.Time - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationUpdatePullThroughCacheRuleMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdatePullThroughCacheRule{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdatePullThroughCacheRule{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "UpdatePullThroughCacheRule"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpUpdatePullThroughCacheRuleValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdatePullThroughCacheRule(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opUpdatePullThroughCacheRule(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "UpdatePullThroughCacheRule", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UploadLayerPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UploadLayerPart.go deleted file mode 100644 index b5160be8d5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UploadLayerPart.go +++ /dev/null @@ -1,175 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Uploads an image layer part to Amazon ECR. When an image is pushed, each new -// image layer is uploaded in parts. The maximum size of each image layer part can -// be 20971520 bytes (or about 20MB). The UploadLayerPart API is called once per -// each new image layer part. This operation is used by the Amazon ECR proxy and is -// not generally used by customers for pulling and pushing images. In most cases, -// you should use the docker CLI to pull, tag, and push images. -func (c *Client) UploadLayerPart(ctx context.Context, params *UploadLayerPartInput, optFns ...func(*Options)) (*UploadLayerPartOutput, error) { - if params == nil { - params = &UploadLayerPartInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "UploadLayerPart", params, optFns, c.addOperationUploadLayerPartMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*UploadLayerPartOutput) - out.ResultMetadata = metadata - return out, nil -} - -type UploadLayerPartInput struct { - - // The base64-encoded layer part payload. - // - // This member is required. - LayerPartBlob []byte - - // The position of the first byte of the layer part witin the overall image layer. - // - // This member is required. - PartFirstByte *int64 - - // The position of the last byte of the layer part within the overall image layer. - // - // This member is required. - PartLastByte *int64 - - // The name of the repository to which you are uploading layer parts. - // - // This member is required. - RepositoryName *string - - // The upload ID from a previous InitiateLayerUpload operation to associate with - // the layer part upload. - // - // This member is required. - UploadId *string - - // The Amazon Web Services account ID associated with the registry to which you - // are uploading layer parts. If you do not specify a registry, the default - // registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type UploadLayerPartOutput struct { - - // The integer value of the last byte received in the request. - LastByteReceived *int64 - - // The registry ID associated with the request. - RegistryId *string - - // The repository name associated with the request. - RepositoryName *string - - // The upload ID associated with the request. - UploadId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationUploadLayerPartMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpUploadLayerPart{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUploadLayerPart{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "UploadLayerPart"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpUploadLayerPartValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadLayerPart(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opUploadLayerPart(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "UploadLayerPart", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ValidatePullThroughCacheRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ValidatePullThroughCacheRule.go deleted file mode 100644 index 7a97f66605..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ValidatePullThroughCacheRule.go +++ /dev/null @@ -1,163 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Validates an existing pull through cache rule for an upstream registry that -// requires authentication. This will retrieve the contents of the Amazon Web -// Services Secrets Manager secret, verify the syntax, and then validate that -// authentication to the upstream registry is successful. -func (c *Client) ValidatePullThroughCacheRule(ctx context.Context, params *ValidatePullThroughCacheRuleInput, optFns ...func(*Options)) (*ValidatePullThroughCacheRuleOutput, error) { - if params == nil { - params = &ValidatePullThroughCacheRuleInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ValidatePullThroughCacheRule", params, optFns, c.addOperationValidatePullThroughCacheRuleMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ValidatePullThroughCacheRuleOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ValidatePullThroughCacheRuleInput struct { - - // The repository name prefix associated with the pull through cache rule. - // - // This member is required. - EcrRepositoryPrefix *string - - // The registry ID associated with the pull through cache rule. If you do not - // specify a registry, the default registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type ValidatePullThroughCacheRuleOutput struct { - - // The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager - // secret associated with the pull through cache rule. - CredentialArn *string - - // The Amazon ECR repository prefix associated with the pull through cache rule. - EcrRepositoryPrefix *string - - // The reason the validation failed. For more details about possible causes and - // how to address them, see Using pull through cache rules (https://docs.aws.amazon.com/AmazonECR/latest/userguide/pull-through-cache.html) - // in the Amazon Elastic Container Registry User Guide. - Failure *string - - // Whether or not the pull through cache rule was validated. If true , Amazon ECR - // was able to reach the upstream registry and authentication was successful. If - // false , there was an issue and validation failed. The failure reason indicates - // the cause. - IsValid bool - - // The registry ID associated with the request. - RegistryId *string - - // The upstream registry URL associated with the pull through cache rule. - UpstreamRegistryUrl *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationValidatePullThroughCacheRuleMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpValidatePullThroughCacheRule{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpValidatePullThroughCacheRule{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ValidatePullThroughCacheRule"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpValidatePullThroughCacheRuleValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opValidatePullThroughCacheRule(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opValidatePullThroughCacheRule(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ValidatePullThroughCacheRule", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/auth.go deleted file mode 100644 index 86ba7edf6b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/auth.go +++ /dev/null @@ -1,284 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { - params.Region = options.Region -} - -type setLegacyContextSigningOptionsMiddleware struct { -} - -func (*setLegacyContextSigningOptionsMiddleware) ID() string { - return "setLegacyContextSigningOptions" -} - -func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - schemeID := rscheme.Scheme.SchemeID() - - if sn := awsmiddleware.GetSigningName(ctx); sn != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) - } - } - - if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) - } - } - - return next.HandleFinalize(ctx, in) -} - -func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) -} - -type withAnonymous struct { - resolver AuthSchemeResolver -} - -var _ AuthSchemeResolver = (*withAnonymous)(nil) - -func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - opts, err := v.resolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return nil, err - } - - opts = append(opts, &smithyauth.Option{ - SchemeID: smithyauth.SchemeIDAnonymous, - }) - return opts, nil -} - -func wrapWithAnonymousAuth(options *Options) { - if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { - return - } - - options.AuthSchemeResolver = &withAnonymous{ - resolver: options.AuthSchemeResolver, - } -} - -// AuthResolverParameters contains the set of inputs necessary for auth scheme -// resolution. -type AuthResolverParameters struct { - // The name of the operation being invoked. - Operation string - - // The region in which the operation is being invoked. - Region string -} - -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { - params := &AuthResolverParameters{ - Operation: operation, - } - - bindAuthParamsRegion(params, input, options) - - return params -} - -// AuthSchemeResolver returns a set of possible authentication options for an -// operation. -type AuthSchemeResolver interface { - ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) -} - -type defaultAuthSchemeResolver struct{} - -var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) - -func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - if overrides, ok := operationAuthOptions[params.Operation]; ok { - return overrides(params), nil - } - return serviceAuthOptions(params), nil -} - -var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{} - -func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "ecr") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - return props - }(), - }, - } -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) - options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) - } - - scheme, ok := m.selectScheme(options) - if !ok { - return out, metadata, fmt.Errorf("could not select an auth scheme") - } - - ctx = setResolvedAuthScheme(ctx, scheme) - return next.HandleFinalize(ctx, in) -} - -func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - for _, option := range options { - if option.SchemeID == smithyauth.SchemeIDAnonymous { - return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true - } - - for _, scheme := range m.options.AuthSchemes { - if scheme.SchemeID() != option.SchemeID { - continue - } - - if scheme.IdentityResolver(m.options) != nil { - return newResolvedAuthScheme(scheme, option), true - } - } - } - - return nil, false -} - -type resolvedAuthSchemeKey struct{} - -type resolvedAuthScheme struct { - Scheme smithyhttp.AuthScheme - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { - return &resolvedAuthScheme{ - Scheme: scheme, - IdentityProperties: option.IdentityProperties, - SignerProperties: option.SignerProperties, - } -} - -func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { - return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) -} - -func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { - v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) - return v -} - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - resolver := rscheme.Scheme.IdentityResolver(m.options) - if resolver == nil { - return out, metadata, fmt.Errorf("no identity resolver") - } - - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) - if err != nil { - return out, metadata, fmt.Errorf("get identity: %w", err) - } - - ctx = setIdentity(ctx, identity) - return next.HandleFinalize(ctx, in) -} - -type identityKey struct{} - -func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { - return middleware.WithStackValue(ctx, identityKey{}, identity) -} - -func getIdentity(ctx context.Context) smithyauth.Identity { - v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) - return v -} - -type signRequestMiddleware struct { -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - identity := getIdentity(ctx) - if identity == nil { - return out, metadata, fmt.Errorf("no identity") - } - - signer := rscheme.Scheme.Signer() - if signer == nil { - return out, metadata, fmt.Errorf("no signer") - } - - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { - return out, metadata, fmt.Errorf("sign request: %w", err) - } - - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/deserializers.go deleted file mode 100644 index d4c99343a8..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/deserializers.go +++ /dev/null @@ -1,14246 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - smithy "github.com/aws/smithy-go" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithytime "github.com/aws/smithy-go/time" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "math" - "strings" -) - -type awsAwsjson11_deserializeOpBatchCheckLayerAvailability struct { -} - -func (*awsAwsjson11_deserializeOpBatchCheckLayerAvailability) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpBatchCheckLayerAvailability) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorBatchCheckLayerAvailability(response, &metadata) - } - output := &BatchCheckLayerAvailabilityOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentBatchCheckLayerAvailabilityOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorBatchCheckLayerAvailability(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpBatchDeleteImage struct { -} - -func (*awsAwsjson11_deserializeOpBatchDeleteImage) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpBatchDeleteImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorBatchDeleteImage(response, &metadata) - } - output := &BatchDeleteImageOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentBatchDeleteImageOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorBatchDeleteImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpBatchGetImage struct { -} - -func (*awsAwsjson11_deserializeOpBatchGetImage) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpBatchGetImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorBatchGetImage(response, &metadata) - } - output := &BatchGetImageOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentBatchGetImageOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorBatchGetImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("LimitExceededException", errorCode): - return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnableToGetUpstreamImageException", errorCode): - return awsAwsjson11_deserializeErrorUnableToGetUpstreamImageException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpBatchGetRepositoryScanningConfiguration struct { -} - -func (*awsAwsjson11_deserializeOpBatchGetRepositoryScanningConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpBatchGetRepositoryScanningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorBatchGetRepositoryScanningConfiguration(response, &metadata) - } - output := &BatchGetRepositoryScanningConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentBatchGetRepositoryScanningConfigurationOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorBatchGetRepositoryScanningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpCompleteLayerUpload struct { -} - -func (*awsAwsjson11_deserializeOpCompleteLayerUpload) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpCompleteLayerUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorCompleteLayerUpload(response, &metadata) - } - output := &CompleteLayerUploadOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentCompleteLayerUploadOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorCompleteLayerUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("EmptyUploadException", errorCode): - return awsAwsjson11_deserializeErrorEmptyUploadException(response, errorBody) - - case strings.EqualFold("InvalidLayerException", errorCode): - return awsAwsjson11_deserializeErrorInvalidLayerException(response, errorBody) - - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("KmsException", errorCode): - return awsAwsjson11_deserializeErrorKmsException(response, errorBody) - - case strings.EqualFold("LayerAlreadyExistsException", errorCode): - return awsAwsjson11_deserializeErrorLayerAlreadyExistsException(response, errorBody) - - case strings.EqualFold("LayerPartTooSmallException", errorCode): - return awsAwsjson11_deserializeErrorLayerPartTooSmallException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UploadNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorUploadNotFoundException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpCreatePullThroughCacheRule struct { -} - -func (*awsAwsjson11_deserializeOpCreatePullThroughCacheRule) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpCreatePullThroughCacheRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorCreatePullThroughCacheRule(response, &metadata) - } - output := &CreatePullThroughCacheRuleOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentCreatePullThroughCacheRuleOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorCreatePullThroughCacheRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("LimitExceededException", errorCode): - return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) - - case strings.EqualFold("PullThroughCacheRuleAlreadyExistsException", errorCode): - return awsAwsjson11_deserializeErrorPullThroughCacheRuleAlreadyExistsException(response, errorBody) - - case strings.EqualFold("SecretNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorSecretNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnableToAccessSecretException", errorCode): - return awsAwsjson11_deserializeErrorUnableToAccessSecretException(response, errorBody) - - case strings.EqualFold("UnableToDecryptSecretValueException", errorCode): - return awsAwsjson11_deserializeErrorUnableToDecryptSecretValueException(response, errorBody) - - case strings.EqualFold("UnsupportedUpstreamRegistryException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedUpstreamRegistryException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpCreateRepository struct { -} - -func (*awsAwsjson11_deserializeOpCreateRepository) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpCreateRepository) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorCreateRepository(response, &metadata) - } - output := &CreateRepositoryOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentCreateRepositoryOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorCreateRepository(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("InvalidTagParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) - - case strings.EqualFold("KmsException", errorCode): - return awsAwsjson11_deserializeErrorKmsException(response, errorBody) - - case strings.EqualFold("LimitExceededException", errorCode): - return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) - - case strings.EqualFold("RepositoryAlreadyExistsException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryAlreadyExistsException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("TooManyTagsException", errorCode): - return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDeleteLifecyclePolicy struct { -} - -func (*awsAwsjson11_deserializeOpDeleteLifecyclePolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDeleteLifecyclePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDeleteLifecyclePolicy(response, &metadata) - } - output := &DeleteLifecyclePolicyOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDeleteLifecyclePolicyOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDeleteLifecyclePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("LifecyclePolicyNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorLifecyclePolicyNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDeletePullThroughCacheRule struct { -} - -func (*awsAwsjson11_deserializeOpDeletePullThroughCacheRule) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDeletePullThroughCacheRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDeletePullThroughCacheRule(response, &metadata) - } - output := &DeletePullThroughCacheRuleOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDeletePullThroughCacheRuleOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDeletePullThroughCacheRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("PullThroughCacheRuleNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorPullThroughCacheRuleNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDeleteRegistryPolicy struct { -} - -func (*awsAwsjson11_deserializeOpDeleteRegistryPolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDeleteRegistryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDeleteRegistryPolicy(response, &metadata) - } - output := &DeleteRegistryPolicyOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDeleteRegistryPolicyOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDeleteRegistryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RegistryPolicyNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRegistryPolicyNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDeleteRepository struct { -} - -func (*awsAwsjson11_deserializeOpDeleteRepository) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDeleteRepository) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDeleteRepository(response, &metadata) - } - output := &DeleteRepositoryOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDeleteRepositoryOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDeleteRepository(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("KmsException", errorCode): - return awsAwsjson11_deserializeErrorKmsException(response, errorBody) - - case strings.EqualFold("RepositoryNotEmptyException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotEmptyException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDeleteRepositoryPolicy struct { -} - -func (*awsAwsjson11_deserializeOpDeleteRepositoryPolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDeleteRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response, &metadata) - } - output := &DeleteRepositoryPolicyOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDeleteRepositoryPolicyOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryPolicyNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryPolicyNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDescribeImageReplicationStatus struct { -} - -func (*awsAwsjson11_deserializeOpDescribeImageReplicationStatus) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDescribeImageReplicationStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImageReplicationStatus(response, &metadata) - } - output := &DescribeImageReplicationStatusOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDescribeImageReplicationStatusOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDescribeImageReplicationStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("ImageNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) - - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDescribeImages struct { -} - -func (*awsAwsjson11_deserializeOpDescribeImages) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDescribeImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImages(response, &metadata) - } - output := &DescribeImagesOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDescribeImagesOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDescribeImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("ImageNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) - - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDescribeImageScanFindings struct { -} - -func (*awsAwsjson11_deserializeOpDescribeImageScanFindings) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDescribeImageScanFindings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImageScanFindings(response, &metadata) - } - output := &DescribeImageScanFindingsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDescribeImageScanFindingsOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDescribeImageScanFindings(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("ImageNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) - - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ScanNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorScanNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDescribePullThroughCacheRules struct { -} - -func (*awsAwsjson11_deserializeOpDescribePullThroughCacheRules) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDescribePullThroughCacheRules) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribePullThroughCacheRules(response, &metadata) - } - output := &DescribePullThroughCacheRulesOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDescribePullThroughCacheRulesOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDescribePullThroughCacheRules(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("PullThroughCacheRuleNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorPullThroughCacheRuleNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDescribeRegistry struct { -} - -func (*awsAwsjson11_deserializeOpDescribeRegistry) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDescribeRegistry) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeRegistry(response, &metadata) - } - output := &DescribeRegistryOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDescribeRegistryOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDescribeRegistry(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDescribeRepositories struct { -} - -func (*awsAwsjson11_deserializeOpDescribeRepositories) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDescribeRepositories) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeRepositories(response, &metadata) - } - output := &DescribeRepositoriesOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDescribeRepositoriesOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpGetAuthorizationToken struct { -} - -func (*awsAwsjson11_deserializeOpGetAuthorizationToken) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpGetAuthorizationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response, &metadata) - } - output := &GetAuthorizationTokenOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentGetAuthorizationTokenOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpGetDownloadUrlForLayer struct { -} - -func (*awsAwsjson11_deserializeOpGetDownloadUrlForLayer) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpGetDownloadUrlForLayer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetDownloadUrlForLayer(response, &metadata) - } - output := &GetDownloadUrlForLayerOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentGetDownloadUrlForLayerOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorGetDownloadUrlForLayer(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("LayerInaccessibleException", errorCode): - return awsAwsjson11_deserializeErrorLayerInaccessibleException(response, errorBody) - - case strings.EqualFold("LayersNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorLayersNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnableToGetUpstreamLayerException", errorCode): - return awsAwsjson11_deserializeErrorUnableToGetUpstreamLayerException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpGetLifecyclePolicy struct { -} - -func (*awsAwsjson11_deserializeOpGetLifecyclePolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpGetLifecyclePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetLifecyclePolicy(response, &metadata) - } - output := &GetLifecyclePolicyOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentGetLifecyclePolicyOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorGetLifecyclePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("LifecyclePolicyNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorLifecyclePolicyNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpGetLifecyclePolicyPreview struct { -} - -func (*awsAwsjson11_deserializeOpGetLifecyclePolicyPreview) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpGetLifecyclePolicyPreview) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetLifecyclePolicyPreview(response, &metadata) - } - output := &GetLifecyclePolicyPreviewOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentGetLifecyclePolicyPreviewOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorGetLifecyclePolicyPreview(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("LifecyclePolicyPreviewNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorLifecyclePolicyPreviewNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpGetRegistryPolicy struct { -} - -func (*awsAwsjson11_deserializeOpGetRegistryPolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpGetRegistryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetRegistryPolicy(response, &metadata) - } - output := &GetRegistryPolicyOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentGetRegistryPolicyOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorGetRegistryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RegistryPolicyNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRegistryPolicyNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpGetRegistryScanningConfiguration struct { -} - -func (*awsAwsjson11_deserializeOpGetRegistryScanningConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpGetRegistryScanningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetRegistryScanningConfiguration(response, &metadata) - } - output := &GetRegistryScanningConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentGetRegistryScanningConfigurationOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorGetRegistryScanningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpGetRepositoryPolicy struct { -} - -func (*awsAwsjson11_deserializeOpGetRepositoryPolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpGetRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response, &metadata) - } - output := &GetRepositoryPolicyOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentGetRepositoryPolicyOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryPolicyNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryPolicyNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpInitiateLayerUpload struct { -} - -func (*awsAwsjson11_deserializeOpInitiateLayerUpload) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpInitiateLayerUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response, &metadata) - } - output := &InitiateLayerUploadOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentInitiateLayerUploadOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("KmsException", errorCode): - return awsAwsjson11_deserializeErrorKmsException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpListImages struct { -} - -func (*awsAwsjson11_deserializeOpListImages) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpListImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListImages(response, &metadata) - } - output := &ListImagesOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentListImagesOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorListImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpListTagsForResource struct { -} - -func (*awsAwsjson11_deserializeOpListTagsForResource) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListTagsForResource(response, &metadata) - } - output := &ListTagsForResourceOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpPutImage struct { -} - -func (*awsAwsjson11_deserializeOpPutImage) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpPutImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutImage(response, &metadata) - } - output := &PutImageOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentPutImageOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorPutImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("ImageAlreadyExistsException", errorCode): - return awsAwsjson11_deserializeErrorImageAlreadyExistsException(response, errorBody) - - case strings.EqualFold("ImageDigestDoesNotMatchException", errorCode): - return awsAwsjson11_deserializeErrorImageDigestDoesNotMatchException(response, errorBody) - - case strings.EqualFold("ImageTagAlreadyExistsException", errorCode): - return awsAwsjson11_deserializeErrorImageTagAlreadyExistsException(response, errorBody) - - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("KmsException", errorCode): - return awsAwsjson11_deserializeErrorKmsException(response, errorBody) - - case strings.EqualFold("LayersNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorLayersNotFoundException(response, errorBody) - - case strings.EqualFold("LimitExceededException", errorCode): - return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) - - case strings.EqualFold("ReferencedImagesNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorReferencedImagesNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpPutImageScanningConfiguration struct { -} - -func (*awsAwsjson11_deserializeOpPutImageScanningConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpPutImageScanningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutImageScanningConfiguration(response, &metadata) - } - output := &PutImageScanningConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentPutImageScanningConfigurationOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorPutImageScanningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpPutImageTagMutability struct { -} - -func (*awsAwsjson11_deserializeOpPutImageTagMutability) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpPutImageTagMutability) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutImageTagMutability(response, &metadata) - } - output := &PutImageTagMutabilityOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentPutImageTagMutabilityOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorPutImageTagMutability(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpPutLifecyclePolicy struct { -} - -func (*awsAwsjson11_deserializeOpPutLifecyclePolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpPutLifecyclePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutLifecyclePolicy(response, &metadata) - } - output := &PutLifecyclePolicyOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentPutLifecyclePolicyOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorPutLifecyclePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpPutRegistryPolicy struct { -} - -func (*awsAwsjson11_deserializeOpPutRegistryPolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpPutRegistryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutRegistryPolicy(response, &metadata) - } - output := &PutRegistryPolicyOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentPutRegistryPolicyOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorPutRegistryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpPutRegistryScanningConfiguration struct { -} - -func (*awsAwsjson11_deserializeOpPutRegistryScanningConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpPutRegistryScanningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutRegistryScanningConfiguration(response, &metadata) - } - output := &PutRegistryScanningConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentPutRegistryScanningConfigurationOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorPutRegistryScanningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpPutReplicationConfiguration struct { -} - -func (*awsAwsjson11_deserializeOpPutReplicationConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpPutReplicationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutReplicationConfiguration(response, &metadata) - } - output := &PutReplicationConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentPutReplicationConfigurationOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorPutReplicationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpSetRepositoryPolicy struct { -} - -func (*awsAwsjson11_deserializeOpSetRepositoryPolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpSetRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response, &metadata) - } - output := &SetRepositoryPolicyOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentSetRepositoryPolicyOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpStartImageScan struct { -} - -func (*awsAwsjson11_deserializeOpStartImageScan) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpStartImageScan) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorStartImageScan(response, &metadata) - } - output := &StartImageScanOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentStartImageScanOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorStartImageScan(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("ImageNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) - - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("LimitExceededException", errorCode): - return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedImageTypeException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedImageTypeException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpStartLifecyclePolicyPreview struct { -} - -func (*awsAwsjson11_deserializeOpStartLifecyclePolicyPreview) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpStartLifecyclePolicyPreview) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorStartLifecyclePolicyPreview(response, &metadata) - } - output := &StartLifecyclePolicyPreviewOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentStartLifecyclePolicyPreviewOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorStartLifecyclePolicyPreview(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("LifecyclePolicyNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorLifecyclePolicyNotFoundException(response, errorBody) - - case strings.EqualFold("LifecyclePolicyPreviewInProgressException", errorCode): - return awsAwsjson11_deserializeErrorLifecyclePolicyPreviewInProgressException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpTagResource struct { -} - -func (*awsAwsjson11_deserializeOpTagResource) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorTagResource(response, &metadata) - } - output := &TagResourceOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentTagResourceOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("InvalidTagParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("TooManyTagsException", errorCode): - return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpUntagResource struct { -} - -func (*awsAwsjson11_deserializeOpUntagResource) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorUntagResource(response, &metadata) - } - output := &UntagResourceOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentUntagResourceOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("InvalidTagParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("TooManyTagsException", errorCode): - return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpUpdatePullThroughCacheRule struct { -} - -func (*awsAwsjson11_deserializeOpUpdatePullThroughCacheRule) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpUpdatePullThroughCacheRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorUpdatePullThroughCacheRule(response, &metadata) - } - output := &UpdatePullThroughCacheRuleOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentUpdatePullThroughCacheRuleOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorUpdatePullThroughCacheRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("PullThroughCacheRuleNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorPullThroughCacheRuleNotFoundException(response, errorBody) - - case strings.EqualFold("SecretNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorSecretNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnableToAccessSecretException", errorCode): - return awsAwsjson11_deserializeErrorUnableToAccessSecretException(response, errorBody) - - case strings.EqualFold("UnableToDecryptSecretValueException", errorCode): - return awsAwsjson11_deserializeErrorUnableToDecryptSecretValueException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpUploadLayerPart struct { -} - -func (*awsAwsjson11_deserializeOpUploadLayerPart) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpUploadLayerPart) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorUploadLayerPart(response, &metadata) - } - output := &UploadLayerPartOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentUploadLayerPartOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorUploadLayerPart(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidLayerPartException", errorCode): - return awsAwsjson11_deserializeErrorInvalidLayerPartException(response, errorBody) - - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("KmsException", errorCode): - return awsAwsjson11_deserializeErrorKmsException(response, errorBody) - - case strings.EqualFold("LimitExceededException", errorCode): - return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UploadNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorUploadNotFoundException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpValidatePullThroughCacheRule struct { -} - -func (*awsAwsjson11_deserializeOpValidatePullThroughCacheRule) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpValidatePullThroughCacheRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorValidatePullThroughCacheRule(response, &metadata) - } - output := &ValidatePullThroughCacheRuleOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentValidatePullThroughCacheRuleOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorValidatePullThroughCacheRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("PullThroughCacheRuleNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorPullThroughCacheRuleNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("ValidationException", errorCode): - return awsAwsjson11_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsAwsjson11_deserializeErrorEmptyUploadException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.EmptyUploadException{} - err := awsAwsjson11_deserializeDocumentEmptyUploadException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorImageAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.ImageAlreadyExistsException{} - err := awsAwsjson11_deserializeDocumentImageAlreadyExistsException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorImageDigestDoesNotMatchException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.ImageDigestDoesNotMatchException{} - err := awsAwsjson11_deserializeDocumentImageDigestDoesNotMatchException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorImageNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.ImageNotFoundException{} - err := awsAwsjson11_deserializeDocumentImageNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorImageTagAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.ImageTagAlreadyExistsException{} - err := awsAwsjson11_deserializeDocumentImageTagAlreadyExistsException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorInvalidLayerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.InvalidLayerException{} - err := awsAwsjson11_deserializeDocumentInvalidLayerException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorInvalidLayerPartException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.InvalidLayerPartException{} - err := awsAwsjson11_deserializeDocumentInvalidLayerPartException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorInvalidParameterException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.InvalidParameterException{} - err := awsAwsjson11_deserializeDocumentInvalidParameterException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorInvalidTagParameterException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.InvalidTagParameterException{} - err := awsAwsjson11_deserializeDocumentInvalidTagParameterException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorKmsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.KmsException{} - err := awsAwsjson11_deserializeDocumentKmsException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorLayerAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.LayerAlreadyExistsException{} - err := awsAwsjson11_deserializeDocumentLayerAlreadyExistsException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorLayerInaccessibleException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.LayerInaccessibleException{} - err := awsAwsjson11_deserializeDocumentLayerInaccessibleException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorLayerPartTooSmallException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.LayerPartTooSmallException{} - err := awsAwsjson11_deserializeDocumentLayerPartTooSmallException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorLayersNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.LayersNotFoundException{} - err := awsAwsjson11_deserializeDocumentLayersNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorLifecyclePolicyNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.LifecyclePolicyNotFoundException{} - err := awsAwsjson11_deserializeDocumentLifecyclePolicyNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorLifecyclePolicyPreviewInProgressException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.LifecyclePolicyPreviewInProgressException{} - err := awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewInProgressException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorLifecyclePolicyPreviewNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.LifecyclePolicyPreviewNotFoundException{} - err := awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorLimitExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.LimitExceededException{} - err := awsAwsjson11_deserializeDocumentLimitExceededException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorPullThroughCacheRuleAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.PullThroughCacheRuleAlreadyExistsException{} - err := awsAwsjson11_deserializeDocumentPullThroughCacheRuleAlreadyExistsException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorPullThroughCacheRuleNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.PullThroughCacheRuleNotFoundException{} - err := awsAwsjson11_deserializeDocumentPullThroughCacheRuleNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorReferencedImagesNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.ReferencedImagesNotFoundException{} - err := awsAwsjson11_deserializeDocumentReferencedImagesNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorRegistryPolicyNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.RegistryPolicyNotFoundException{} - err := awsAwsjson11_deserializeDocumentRegistryPolicyNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorRepositoryAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.RepositoryAlreadyExistsException{} - err := awsAwsjson11_deserializeDocumentRepositoryAlreadyExistsException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorRepositoryNotEmptyException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.RepositoryNotEmptyException{} - err := awsAwsjson11_deserializeDocumentRepositoryNotEmptyException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorRepositoryNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.RepositoryNotFoundException{} - err := awsAwsjson11_deserializeDocumentRepositoryNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorRepositoryPolicyNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.RepositoryPolicyNotFoundException{} - err := awsAwsjson11_deserializeDocumentRepositoryPolicyNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorScanNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.ScanNotFoundException{} - err := awsAwsjson11_deserializeDocumentScanNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorSecretNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.SecretNotFoundException{} - err := awsAwsjson11_deserializeDocumentSecretNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.ServerException{} - err := awsAwsjson11_deserializeDocumentServerException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorTooManyTagsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.TooManyTagsException{} - err := awsAwsjson11_deserializeDocumentTooManyTagsException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorUnableToAccessSecretException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.UnableToAccessSecretException{} - err := awsAwsjson11_deserializeDocumentUnableToAccessSecretException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorUnableToDecryptSecretValueException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.UnableToDecryptSecretValueException{} - err := awsAwsjson11_deserializeDocumentUnableToDecryptSecretValueException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorUnableToGetUpstreamImageException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.UnableToGetUpstreamImageException{} - err := awsAwsjson11_deserializeDocumentUnableToGetUpstreamImageException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorUnableToGetUpstreamLayerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.UnableToGetUpstreamLayerException{} - err := awsAwsjson11_deserializeDocumentUnableToGetUpstreamLayerException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorUnsupportedImageTypeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.UnsupportedImageTypeException{} - err := awsAwsjson11_deserializeDocumentUnsupportedImageTypeException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorUnsupportedUpstreamRegistryException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.UnsupportedUpstreamRegistryException{} - err := awsAwsjson11_deserializeDocumentUnsupportedUpstreamRegistryException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorUploadNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.UploadNotFoundException{} - err := awsAwsjson11_deserializeDocumentUploadNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorValidationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.ValidationException{} - err := awsAwsjson11_deserializeDocumentValidationException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeDocumentAttribute(v **types.Attribute, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.Attribute - if *v == nil { - sv = &types.Attribute{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "key": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AttributeKey to be of type string, got %T instead", value) - } - sv.Key = ptr.String(jtv) - } - - case "value": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AttributeValue to be of type string, got %T instead", value) - } - sv.Value = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentAttributeList(v *[]types.Attribute, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.Attribute - if *v == nil { - cv = []types.Attribute{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.Attribute - destAddr := &col - if err := awsAwsjson11_deserializeDocumentAttribute(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentAuthorizationData(v **types.AuthorizationData, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.AuthorizationData - if *v == nil { - sv = &types.AuthorizationData{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "authorizationToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Base64 to be of type string, got %T instead", value) - } - sv.AuthorizationToken = ptr.String(jtv) - } - - case "expiresAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.ExpiresAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected ExpirationTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "proxyEndpoint": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ProxyEndpoint to be of type string, got %T instead", value) - } - sv.ProxyEndpoint = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentAuthorizationDataList(v *[]types.AuthorizationData, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.AuthorizationData - if *v == nil { - cv = []types.AuthorizationData{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.AuthorizationData - destAddr := &col - if err := awsAwsjson11_deserializeDocumentAuthorizationData(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentAwsEcrContainerImageDetails(v **types.AwsEcrContainerImageDetails, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.AwsEcrContainerImageDetails - if *v == nil { - sv = &types.AwsEcrContainerImageDetails{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "architecture": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Arch to be of type string, got %T instead", value) - } - sv.Architecture = ptr.String(jtv) - } - - case "author": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Author to be of type string, got %T instead", value) - } - sv.Author = ptr.String(jtv) - } - - case "imageHash": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) - } - sv.ImageHash = ptr.String(jtv) - } - - case "imageTags": - if err := awsAwsjson11_deserializeDocumentImageTagsList(&sv.ImageTags, value); err != nil { - return err - } - - case "platform": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Platform to be of type string, got %T instead", value) - } - sv.Platform = ptr.String(jtv) - } - - case "pushedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.PushedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) - - } - } - - case "registry": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.Registry = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentCvssScore(v **types.CvssScore, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.CvssScore - if *v == nil { - sv = &types.CvssScore{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "baseScore": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.BaseScore = f64 - - case string: - var f64 float64 - switch { - case strings.EqualFold(jtv, "NaN"): - f64 = math.NaN() - - case strings.EqualFold(jtv, "Infinity"): - f64 = math.Inf(1) - - case strings.EqualFold(jtv, "-Infinity"): - f64 = math.Inf(-1) - - default: - return fmt.Errorf("unknown JSON number value: %s", jtv) - - } - sv.BaseScore = f64 - - default: - return fmt.Errorf("expected BaseScore to be a JSON Number, got %T instead", value) - - } - } - - case "scoringVector": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScoringVector to be of type string, got %T instead", value) - } - sv.ScoringVector = ptr.String(jtv) - } - - case "source": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Source to be of type string, got %T instead", value) - } - sv.Source = ptr.String(jtv) - } - - case "version": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Version to be of type string, got %T instead", value) - } - sv.Version = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentCvssScoreAdjustment(v **types.CvssScoreAdjustment, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.CvssScoreAdjustment - if *v == nil { - sv = &types.CvssScoreAdjustment{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "metric": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Metric to be of type string, got %T instead", value) - } - sv.Metric = ptr.String(jtv) - } - - case "reason": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Reason to be of type string, got %T instead", value) - } - sv.Reason = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentCvssScoreAdjustmentList(v *[]types.CvssScoreAdjustment, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.CvssScoreAdjustment - if *v == nil { - cv = []types.CvssScoreAdjustment{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.CvssScoreAdjustment - destAddr := &col - if err := awsAwsjson11_deserializeDocumentCvssScoreAdjustment(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentCvssScoreDetails(v **types.CvssScoreDetails, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.CvssScoreDetails - if *v == nil { - sv = &types.CvssScoreDetails{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "adjustments": - if err := awsAwsjson11_deserializeDocumentCvssScoreAdjustmentList(&sv.Adjustments, value); err != nil { - return err - } - - case "score": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.Score = f64 - - case string: - var f64 float64 - switch { - case strings.EqualFold(jtv, "NaN"): - f64 = math.NaN() - - case strings.EqualFold(jtv, "Infinity"): - f64 = math.Inf(1) - - case strings.EqualFold(jtv, "-Infinity"): - f64 = math.Inf(-1) - - default: - return fmt.Errorf("unknown JSON number value: %s", jtv) - - } - sv.Score = f64 - - default: - return fmt.Errorf("expected Score to be a JSON Number, got %T instead", value) - - } - } - - case "scoreSource": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Source to be of type string, got %T instead", value) - } - sv.ScoreSource = ptr.String(jtv) - } - - case "scoringVector": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScoringVector to be of type string, got %T instead", value) - } - sv.ScoringVector = ptr.String(jtv) - } - - case "version": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Version to be of type string, got %T instead", value) - } - sv.Version = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentCvssScoreList(v *[]types.CvssScore, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.CvssScore - if *v == nil { - cv = []types.CvssScore{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.CvssScore - destAddr := &col - if err := awsAwsjson11_deserializeDocumentCvssScore(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentEmptyUploadException(v **types.EmptyUploadException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.EmptyUploadException - if *v == nil { - sv = &types.EmptyUploadException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentEncryptionConfiguration(v **types.EncryptionConfiguration, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.EncryptionConfiguration - if *v == nil { - sv = &types.EncryptionConfiguration{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "encryptionType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected EncryptionType to be of type string, got %T instead", value) - } - sv.EncryptionType = types.EncryptionType(jtv) - } - - case "kmsKey": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected KmsKey to be of type string, got %T instead", value) - } - sv.KmsKey = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentEnhancedImageScanFinding(v **types.EnhancedImageScanFinding, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.EnhancedImageScanFinding - if *v == nil { - sv = &types.EnhancedImageScanFinding{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "awsAccountId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.AwsAccountId = ptr.String(jtv) - } - - case "description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected FindingDescription to be of type string, got %T instead", value) - } - sv.Description = ptr.String(jtv) - } - - case "findingArn": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected FindingArn to be of type string, got %T instead", value) - } - sv.FindingArn = ptr.String(jtv) - } - - case "firstObservedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.FirstObservedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) - - } - } - - case "lastObservedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.LastObservedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) - - } - } - - case "packageVulnerabilityDetails": - if err := awsAwsjson11_deserializeDocumentPackageVulnerabilityDetails(&sv.PackageVulnerabilityDetails, value); err != nil { - return err - } - - case "remediation": - if err := awsAwsjson11_deserializeDocumentRemediation(&sv.Remediation, value); err != nil { - return err - } - - case "resources": - if err := awsAwsjson11_deserializeDocumentResourceList(&sv.Resources, value); err != nil { - return err - } - - case "score": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.Score = f64 - - case string: - var f64 float64 - switch { - case strings.EqualFold(jtv, "NaN"): - f64 = math.NaN() - - case strings.EqualFold(jtv, "Infinity"): - f64 = math.Inf(1) - - case strings.EqualFold(jtv, "-Infinity"): - f64 = math.Inf(-1) - - default: - return fmt.Errorf("unknown JSON number value: %s", jtv) - - } - sv.Score = f64 - - default: - return fmt.Errorf("expected Score to be a JSON Number, got %T instead", value) - - } - } - - case "scoreDetails": - if err := awsAwsjson11_deserializeDocumentScoreDetails(&sv.ScoreDetails, value); err != nil { - return err - } - - case "severity": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Severity to be of type string, got %T instead", value) - } - sv.Severity = ptr.String(jtv) - } - - case "status": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Status to be of type string, got %T instead", value) - } - sv.Status = ptr.String(jtv) - } - - case "title": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Title to be of type string, got %T instead", value) - } - sv.Title = ptr.String(jtv) - } - - case "type": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Type to be of type string, got %T instead", value) - } - sv.Type = ptr.String(jtv) - } - - case "updatedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.UpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) - - } - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentEnhancedImageScanFindingList(v *[]types.EnhancedImageScanFinding, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.EnhancedImageScanFinding - if *v == nil { - cv = []types.EnhancedImageScanFinding{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.EnhancedImageScanFinding - destAddr := &col - if err := awsAwsjson11_deserializeDocumentEnhancedImageScanFinding(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentFindingSeverityCounts(v *map[string]int32, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var mv map[string]int32 - if *v == nil { - mv = map[string]int32{} - } else { - mv = *v - } - - for key, value := range shape { - var parsedVal int32 - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected SeverityCount to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - parsedVal = int32(i64) - } - mv[key] = parsedVal - - } - *v = mv - return nil -} - -func awsAwsjson11_deserializeDocumentImage(v **types.Image, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.Image - if *v == nil { - sv = &types.Image{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "imageId": - if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { - return err - } - - case "imageManifest": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageManifest to be of type string, got %T instead", value) - } - sv.ImageManifest = ptr.String(jtv) - } - - case "imageManifestMediaType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) - } - sv.ImageManifestMediaType = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageAlreadyExistsException(v **types.ImageAlreadyExistsException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageAlreadyExistsException - if *v == nil { - sv = &types.ImageAlreadyExistsException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageDetail(v **types.ImageDetail, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageDetail - if *v == nil { - sv = &types.ImageDetail{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "artifactMediaType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) - } - sv.ArtifactMediaType = ptr.String(jtv) - } - - case "imageDigest": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) - } - sv.ImageDigest = ptr.String(jtv) - } - - case "imageManifestMediaType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) - } - sv.ImageManifestMediaType = ptr.String(jtv) - } - - case "imagePushedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.ImagePushedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected PushTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "imageScanFindingsSummary": - if err := awsAwsjson11_deserializeDocumentImageScanFindingsSummary(&sv.ImageScanFindingsSummary, value); err != nil { - return err - } - - case "imageScanStatus": - if err := awsAwsjson11_deserializeDocumentImageScanStatus(&sv.ImageScanStatus, value); err != nil { - return err - } - - case "imageSizeInBytes": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ImageSizeInBytes to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ImageSizeInBytes = ptr.Int64(i64) - } - - case "imageTags": - if err := awsAwsjson11_deserializeDocumentImageTagList(&sv.ImageTags, value); err != nil { - return err - } - - case "lastRecordedPullTime": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.LastRecordedPullTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected RecordedPullTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageDetailList(v *[]types.ImageDetail, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.ImageDetail - if *v == nil { - cv = []types.ImageDetail{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.ImageDetail - destAddr := &col - if err := awsAwsjson11_deserializeDocumentImageDetail(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentImageDigestDoesNotMatchException(v **types.ImageDigestDoesNotMatchException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageDigestDoesNotMatchException - if *v == nil { - sv = &types.ImageDigestDoesNotMatchException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageFailure(v **types.ImageFailure, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageFailure - if *v == nil { - sv = &types.ImageFailure{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "failureCode": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageFailureCode to be of type string, got %T instead", value) - } - sv.FailureCode = types.ImageFailureCode(jtv) - } - - case "failureReason": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageFailureReason to be of type string, got %T instead", value) - } - sv.FailureReason = ptr.String(jtv) - } - - case "imageId": - if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageFailureList(v *[]types.ImageFailure, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.ImageFailure - if *v == nil { - cv = []types.ImageFailure{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.ImageFailure - destAddr := &col - if err := awsAwsjson11_deserializeDocumentImageFailure(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentImageIdentifier(v **types.ImageIdentifier, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageIdentifier - if *v == nil { - sv = &types.ImageIdentifier{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "imageDigest": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) - } - sv.ImageDigest = ptr.String(jtv) - } - - case "imageTag": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageTag to be of type string, got %T instead", value) - } - sv.ImageTag = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageIdentifierList(v *[]types.ImageIdentifier, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.ImageIdentifier - if *v == nil { - cv = []types.ImageIdentifier{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.ImageIdentifier - destAddr := &col - if err := awsAwsjson11_deserializeDocumentImageIdentifier(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentImageList(v *[]types.Image, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.Image - if *v == nil { - cv = []types.Image{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.Image - destAddr := &col - if err := awsAwsjson11_deserializeDocumentImage(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentImageNotFoundException(v **types.ImageNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageNotFoundException - if *v == nil { - sv = &types.ImageNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageReplicationStatus(v **types.ImageReplicationStatus, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageReplicationStatus - if *v == nil { - sv = &types.ImageReplicationStatus{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "failureCode": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ReplicationError to be of type string, got %T instead", value) - } - sv.FailureCode = ptr.String(jtv) - } - - case "region": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Region to be of type string, got %T instead", value) - } - sv.Region = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "status": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ReplicationStatus to be of type string, got %T instead", value) - } - sv.Status = types.ReplicationStatus(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageReplicationStatusList(v *[]types.ImageReplicationStatus, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.ImageReplicationStatus - if *v == nil { - cv = []types.ImageReplicationStatus{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.ImageReplicationStatus - destAddr := &col - if err := awsAwsjson11_deserializeDocumentImageReplicationStatus(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentImageScanFinding(v **types.ImageScanFinding, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageScanFinding - if *v == nil { - sv = &types.ImageScanFinding{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "attributes": - if err := awsAwsjson11_deserializeDocumentAttributeList(&sv.Attributes, value); err != nil { - return err - } - - case "description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected FindingDescription to be of type string, got %T instead", value) - } - sv.Description = ptr.String(jtv) - } - - case "name": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected FindingName to be of type string, got %T instead", value) - } - sv.Name = ptr.String(jtv) - } - - case "severity": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected FindingSeverity to be of type string, got %T instead", value) - } - sv.Severity = types.FindingSeverity(jtv) - } - - case "uri": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Url to be of type string, got %T instead", value) - } - sv.Uri = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageScanFindingList(v *[]types.ImageScanFinding, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.ImageScanFinding - if *v == nil { - cv = []types.ImageScanFinding{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.ImageScanFinding - destAddr := &col - if err := awsAwsjson11_deserializeDocumentImageScanFinding(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentImageScanFindings(v **types.ImageScanFindings, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageScanFindings - if *v == nil { - sv = &types.ImageScanFindings{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "enhancedFindings": - if err := awsAwsjson11_deserializeDocumentEnhancedImageScanFindingList(&sv.EnhancedFindings, value); err != nil { - return err - } - - case "findings": - if err := awsAwsjson11_deserializeDocumentImageScanFindingList(&sv.Findings, value); err != nil { - return err - } - - case "findingSeverityCounts": - if err := awsAwsjson11_deserializeDocumentFindingSeverityCounts(&sv.FindingSeverityCounts, value); err != nil { - return err - } - - case "imageScanCompletedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.ImageScanCompletedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected ScanTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "vulnerabilitySourceUpdatedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.VulnerabilitySourceUpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected VulnerabilitySourceUpdateTimestamp to be a JSON Number, got %T instead", value) - - } - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageScanFindingsSummary(v **types.ImageScanFindingsSummary, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageScanFindingsSummary - if *v == nil { - sv = &types.ImageScanFindingsSummary{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "findingSeverityCounts": - if err := awsAwsjson11_deserializeDocumentFindingSeverityCounts(&sv.FindingSeverityCounts, value); err != nil { - return err - } - - case "imageScanCompletedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.ImageScanCompletedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected ScanTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "vulnerabilitySourceUpdatedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.VulnerabilitySourceUpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected VulnerabilitySourceUpdateTimestamp to be a JSON Number, got %T instead", value) - - } - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageScanningConfiguration(v **types.ImageScanningConfiguration, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageScanningConfiguration - if *v == nil { - sv = &types.ImageScanningConfiguration{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "scanOnPush": - if value != nil { - jtv, ok := value.(bool) - if !ok { - return fmt.Errorf("expected ScanOnPushFlag to be of type *bool, got %T instead", value) - } - sv.ScanOnPush = jtv - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageScanStatus(v **types.ImageScanStatus, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageScanStatus - if *v == nil { - sv = &types.ImageScanStatus{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScanStatusDescription to be of type string, got %T instead", value) - } - sv.Description = ptr.String(jtv) - } - - case "status": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScanStatus to be of type string, got %T instead", value) - } - sv.Status = types.ScanStatus(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageTagAlreadyExistsException(v **types.ImageTagAlreadyExistsException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageTagAlreadyExistsException - if *v == nil { - sv = &types.ImageTagAlreadyExistsException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageTagList(v *[]string, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []string - if *v == nil { - cv = []string{} - } else { - cv = *v - } - - for _, value := range shape { - var col string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageTag to be of type string, got %T instead", value) - } - col = jtv - } - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentImageTagsList(v *[]string, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []string - if *v == nil { - cv = []string{} - } else { - cv = *v - } - - for _, value := range shape { - var col string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageTag to be of type string, got %T instead", value) - } - col = jtv - } - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentInvalidLayerException(v **types.InvalidLayerException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidLayerException - if *v == nil { - sv = &types.InvalidLayerException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentInvalidLayerPartException(v **types.InvalidLayerPartException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidLayerPartException - if *v == nil { - sv = &types.InvalidLayerPartException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "lastValidByteReceived": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected PartSize to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.LastValidByteReceived = ptr.Int64(i64) - } - - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - case "uploadId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) - } - sv.UploadId = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentInvalidParameterException(v **types.InvalidParameterException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidParameterException - if *v == nil { - sv = &types.InvalidParameterException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentInvalidTagParameterException(v **types.InvalidTagParameterException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidTagParameterException - if *v == nil { - sv = &types.InvalidTagParameterException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentKmsException(v **types.KmsException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.KmsException - if *v == nil { - sv = &types.KmsException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "kmsError": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected KmsError to be of type string, got %T instead", value) - } - sv.KmsError = ptr.String(jtv) - } - - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLayer(v **types.Layer, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.Layer - if *v == nil { - sv = &types.Layer{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "layerAvailability": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LayerAvailability to be of type string, got %T instead", value) - } - sv.LayerAvailability = types.LayerAvailability(jtv) - } - - case "layerDigest": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LayerDigest to be of type string, got %T instead", value) - } - sv.LayerDigest = ptr.String(jtv) - } - - case "layerSize": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected LayerSizeInBytes to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.LayerSize = ptr.Int64(i64) - } - - case "mediaType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) - } - sv.MediaType = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLayerAlreadyExistsException(v **types.LayerAlreadyExistsException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LayerAlreadyExistsException - if *v == nil { - sv = &types.LayerAlreadyExistsException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLayerFailure(v **types.LayerFailure, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LayerFailure - if *v == nil { - sv = &types.LayerFailure{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "failureCode": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LayerFailureCode to be of type string, got %T instead", value) - } - sv.FailureCode = types.LayerFailureCode(jtv) - } - - case "failureReason": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LayerFailureReason to be of type string, got %T instead", value) - } - sv.FailureReason = ptr.String(jtv) - } - - case "layerDigest": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected BatchedOperationLayerDigest to be of type string, got %T instead", value) - } - sv.LayerDigest = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLayerFailureList(v *[]types.LayerFailure, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.LayerFailure - if *v == nil { - cv = []types.LayerFailure{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.LayerFailure - destAddr := &col - if err := awsAwsjson11_deserializeDocumentLayerFailure(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentLayerInaccessibleException(v **types.LayerInaccessibleException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LayerInaccessibleException - if *v == nil { - sv = &types.LayerInaccessibleException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLayerList(v *[]types.Layer, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.Layer - if *v == nil { - cv = []types.Layer{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.Layer - destAddr := &col - if err := awsAwsjson11_deserializeDocumentLayer(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentLayerPartTooSmallException(v **types.LayerPartTooSmallException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LayerPartTooSmallException - if *v == nil { - sv = &types.LayerPartTooSmallException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLayersNotFoundException(v **types.LayersNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LayersNotFoundException - if *v == nil { - sv = &types.LayersNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLifecyclePolicyNotFoundException(v **types.LifecyclePolicyNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LifecyclePolicyNotFoundException - if *v == nil { - sv = &types.LifecyclePolicyNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewInProgressException(v **types.LifecyclePolicyPreviewInProgressException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LifecyclePolicyPreviewInProgressException - if *v == nil { - sv = &types.LifecyclePolicyPreviewInProgressException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewNotFoundException(v **types.LifecyclePolicyPreviewNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LifecyclePolicyPreviewNotFoundException - if *v == nil { - sv = &types.LifecyclePolicyPreviewNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewResult(v **types.LifecyclePolicyPreviewResult, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LifecyclePolicyPreviewResult - if *v == nil { - sv = &types.LifecyclePolicyPreviewResult{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "action": - if err := awsAwsjson11_deserializeDocumentLifecyclePolicyRuleAction(&sv.Action, value); err != nil { - return err - } - - case "appliedRulePriority": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected LifecyclePolicyRulePriority to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.AppliedRulePriority = ptr.Int32(int32(i64)) - } - - case "imageDigest": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) - } - sv.ImageDigest = ptr.String(jtv) - } - - case "imagePushedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.ImagePushedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected PushTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "imageTags": - if err := awsAwsjson11_deserializeDocumentImageTagList(&sv.ImageTags, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewResultList(v *[]types.LifecyclePolicyPreviewResult, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.LifecyclePolicyPreviewResult - if *v == nil { - cv = []types.LifecyclePolicyPreviewResult{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.LifecyclePolicyPreviewResult - destAddr := &col - if err := awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewResult(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewSummary(v **types.LifecyclePolicyPreviewSummary, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LifecyclePolicyPreviewSummary - if *v == nil { - sv = &types.LifecyclePolicyPreviewSummary{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "expiringImageTotalCount": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ImageCount to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ExpiringImageTotalCount = ptr.Int32(int32(i64)) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLifecyclePolicyRuleAction(v **types.LifecyclePolicyRuleAction, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LifecyclePolicyRuleAction - if *v == nil { - sv = &types.LifecyclePolicyRuleAction{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "type": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageActionType to be of type string, got %T instead", value) - } - sv.Type = types.ImageActionType(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLimitExceededException(v **types.LimitExceededException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LimitExceededException - if *v == nil { - sv = &types.LimitExceededException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentPackageVulnerabilityDetails(v **types.PackageVulnerabilityDetails, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.PackageVulnerabilityDetails - if *v == nil { - sv = &types.PackageVulnerabilityDetails{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "cvss": - if err := awsAwsjson11_deserializeDocumentCvssScoreList(&sv.Cvss, value); err != nil { - return err - } - - case "referenceUrls": - if err := awsAwsjson11_deserializeDocumentReferenceUrlsList(&sv.ReferenceUrls, value); err != nil { - return err - } - - case "relatedVulnerabilities": - if err := awsAwsjson11_deserializeDocumentRelatedVulnerabilitiesList(&sv.RelatedVulnerabilities, value); err != nil { - return err - } - - case "source": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Source to be of type string, got %T instead", value) - } - sv.Source = ptr.String(jtv) - } - - case "sourceUrl": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Url to be of type string, got %T instead", value) - } - sv.SourceUrl = ptr.String(jtv) - } - - case "vendorCreatedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.VendorCreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) - - } - } - - case "vendorSeverity": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Severity to be of type string, got %T instead", value) - } - sv.VendorSeverity = ptr.String(jtv) - } - - case "vendorUpdatedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.VendorUpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) - - } - } - - case "vulnerabilityId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected VulnerabilityId to be of type string, got %T instead", value) - } - sv.VulnerabilityId = ptr.String(jtv) - } - - case "vulnerablePackages": - if err := awsAwsjson11_deserializeDocumentVulnerablePackagesList(&sv.VulnerablePackages, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentPullThroughCacheRule(v **types.PullThroughCacheRule, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.PullThroughCacheRule - if *v == nil { - sv = &types.PullThroughCacheRule{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "createdAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected CreationTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "credentialArn": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected CredentialArn to be of type string, got %T instead", value) - } - sv.CredentialArn = ptr.String(jtv) - } - - case "ecrRepositoryPrefix": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected PullThroughCacheRuleRepositoryPrefix to be of type string, got %T instead", value) - } - sv.EcrRepositoryPrefix = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "updatedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.UpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected UpdatedTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "upstreamRegistry": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UpstreamRegistry to be of type string, got %T instead", value) - } - sv.UpstreamRegistry = types.UpstreamRegistry(jtv) - } - - case "upstreamRegistryUrl": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Url to be of type string, got %T instead", value) - } - sv.UpstreamRegistryUrl = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentPullThroughCacheRuleAlreadyExistsException(v **types.PullThroughCacheRuleAlreadyExistsException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.PullThroughCacheRuleAlreadyExistsException - if *v == nil { - sv = &types.PullThroughCacheRuleAlreadyExistsException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentPullThroughCacheRuleList(v *[]types.PullThroughCacheRule, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.PullThroughCacheRule - if *v == nil { - cv = []types.PullThroughCacheRule{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.PullThroughCacheRule - destAddr := &col - if err := awsAwsjson11_deserializeDocumentPullThroughCacheRule(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentPullThroughCacheRuleNotFoundException(v **types.PullThroughCacheRuleNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.PullThroughCacheRuleNotFoundException - if *v == nil { - sv = &types.PullThroughCacheRuleNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRecommendation(v **types.Recommendation, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.Recommendation - if *v == nil { - sv = &types.Recommendation{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "text": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RecommendationText to be of type string, got %T instead", value) - } - sv.Text = ptr.String(jtv) - } - - case "url": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Url to be of type string, got %T instead", value) - } - sv.Url = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentReferencedImagesNotFoundException(v **types.ReferencedImagesNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ReferencedImagesNotFoundException - if *v == nil { - sv = &types.ReferencedImagesNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentReferenceUrlsList(v *[]string, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []string - if *v == nil { - cv = []string{} - } else { - cv = *v - } - - for _, value := range shape { - var col string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Url to be of type string, got %T instead", value) - } - col = jtv - } - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentRegistryPolicyNotFoundException(v **types.RegistryPolicyNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RegistryPolicyNotFoundException - if *v == nil { - sv = &types.RegistryPolicyNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRegistryScanningConfiguration(v **types.RegistryScanningConfiguration, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RegistryScanningConfiguration - if *v == nil { - sv = &types.RegistryScanningConfiguration{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "rules": - if err := awsAwsjson11_deserializeDocumentRegistryScanningRuleList(&sv.Rules, value); err != nil { - return err - } - - case "scanType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScanType to be of type string, got %T instead", value) - } - sv.ScanType = types.ScanType(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRegistryScanningRule(v **types.RegistryScanningRule, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RegistryScanningRule - if *v == nil { - sv = &types.RegistryScanningRule{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "repositoryFilters": - if err := awsAwsjson11_deserializeDocumentScanningRepositoryFilterList(&sv.RepositoryFilters, value); err != nil { - return err - } - - case "scanFrequency": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScanFrequency to be of type string, got %T instead", value) - } - sv.ScanFrequency = types.ScanFrequency(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRegistryScanningRuleList(v *[]types.RegistryScanningRule, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.RegistryScanningRule - if *v == nil { - cv = []types.RegistryScanningRule{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.RegistryScanningRule - destAddr := &col - if err := awsAwsjson11_deserializeDocumentRegistryScanningRule(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentRelatedVulnerabilitiesList(v *[]string, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []string - if *v == nil { - cv = []string{} - } else { - cv = *v - } - - for _, value := range shape { - var col string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RelatedVulnerability to be of type string, got %T instead", value) - } - col = jtv - } - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentRemediation(v **types.Remediation, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.Remediation - if *v == nil { - sv = &types.Remediation{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "recommendation": - if err := awsAwsjson11_deserializeDocumentRecommendation(&sv.Recommendation, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentReplicationConfiguration(v **types.ReplicationConfiguration, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ReplicationConfiguration - if *v == nil { - sv = &types.ReplicationConfiguration{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "rules": - if err := awsAwsjson11_deserializeDocumentReplicationRuleList(&sv.Rules, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentReplicationDestination(v **types.ReplicationDestination, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ReplicationDestination - if *v == nil { - sv = &types.ReplicationDestination{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "region": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Region to be of type string, got %T instead", value) - } - sv.Region = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentReplicationDestinationList(v *[]types.ReplicationDestination, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.ReplicationDestination - if *v == nil { - cv = []types.ReplicationDestination{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.ReplicationDestination - destAddr := &col - if err := awsAwsjson11_deserializeDocumentReplicationDestination(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentReplicationRule(v **types.ReplicationRule, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ReplicationRule - if *v == nil { - sv = &types.ReplicationRule{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "destinations": - if err := awsAwsjson11_deserializeDocumentReplicationDestinationList(&sv.Destinations, value); err != nil { - return err - } - - case "repositoryFilters": - if err := awsAwsjson11_deserializeDocumentRepositoryFilterList(&sv.RepositoryFilters, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentReplicationRuleList(v *[]types.ReplicationRule, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.ReplicationRule - if *v == nil { - cv = []types.ReplicationRule{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.ReplicationRule - destAddr := &col - if err := awsAwsjson11_deserializeDocumentReplicationRule(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentRepository(v **types.Repository, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.Repository - if *v == nil { - sv = &types.Repository{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "createdAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected CreationTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "encryptionConfiguration": - if err := awsAwsjson11_deserializeDocumentEncryptionConfiguration(&sv.EncryptionConfiguration, value); err != nil { - return err - } - - case "imageScanningConfiguration": - if err := awsAwsjson11_deserializeDocumentImageScanningConfiguration(&sv.ImageScanningConfiguration, value); err != nil { - return err - } - - case "imageTagMutability": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageTagMutability to be of type string, got %T instead", value) - } - sv.ImageTagMutability = types.ImageTagMutability(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryArn": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Arn to be of type string, got %T instead", value) - } - sv.RepositoryArn = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - case "repositoryUri": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Url to be of type string, got %T instead", value) - } - sv.RepositoryUri = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryAlreadyExistsException(v **types.RepositoryAlreadyExistsException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RepositoryAlreadyExistsException - if *v == nil { - sv = &types.RepositoryAlreadyExistsException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryFilter(v **types.RepositoryFilter, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RepositoryFilter - if *v == nil { - sv = &types.RepositoryFilter{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "filter": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryFilterValue to be of type string, got %T instead", value) - } - sv.Filter = ptr.String(jtv) - } - - case "filterType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryFilterType to be of type string, got %T instead", value) - } - sv.FilterType = types.RepositoryFilterType(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryFilterList(v *[]types.RepositoryFilter, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.RepositoryFilter - if *v == nil { - cv = []types.RepositoryFilter{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.RepositoryFilter - destAddr := &col - if err := awsAwsjson11_deserializeDocumentRepositoryFilter(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryList(v *[]types.Repository, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.Repository - if *v == nil { - cv = []types.Repository{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.Repository - destAddr := &col - if err := awsAwsjson11_deserializeDocumentRepository(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryNotEmptyException(v **types.RepositoryNotEmptyException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RepositoryNotEmptyException - if *v == nil { - sv = &types.RepositoryNotEmptyException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryNotFoundException(v **types.RepositoryNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RepositoryNotFoundException - if *v == nil { - sv = &types.RepositoryNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryPolicyNotFoundException(v **types.RepositoryPolicyNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RepositoryPolicyNotFoundException - if *v == nil { - sv = &types.RepositoryPolicyNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryScanningConfiguration(v **types.RepositoryScanningConfiguration, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RepositoryScanningConfiguration - if *v == nil { - sv = &types.RepositoryScanningConfiguration{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "appliedScanFilters": - if err := awsAwsjson11_deserializeDocumentScanningRepositoryFilterList(&sv.AppliedScanFilters, value); err != nil { - return err - } - - case "repositoryArn": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Arn to be of type string, got %T instead", value) - } - sv.RepositoryArn = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - case "scanFrequency": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScanFrequency to be of type string, got %T instead", value) - } - sv.ScanFrequency = types.ScanFrequency(jtv) - } - - case "scanOnPush": - if value != nil { - jtv, ok := value.(bool) - if !ok { - return fmt.Errorf("expected ScanOnPushFlag to be of type *bool, got %T instead", value) - } - sv.ScanOnPush = jtv - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailure(v **types.RepositoryScanningConfigurationFailure, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RepositoryScanningConfigurationFailure - if *v == nil { - sv = &types.RepositoryScanningConfigurationFailure{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "failureCode": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScanningConfigurationFailureCode to be of type string, got %T instead", value) - } - sv.FailureCode = types.ScanningConfigurationFailureCode(jtv) - } - - case "failureReason": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScanningConfigurationFailureReason to be of type string, got %T instead", value) - } - sv.FailureReason = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailureList(v *[]types.RepositoryScanningConfigurationFailure, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.RepositoryScanningConfigurationFailure - if *v == nil { - cv = []types.RepositoryScanningConfigurationFailure{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.RepositoryScanningConfigurationFailure - destAddr := &col - if err := awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailure(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationList(v *[]types.RepositoryScanningConfiguration, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.RepositoryScanningConfiguration - if *v == nil { - cv = []types.RepositoryScanningConfiguration{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.RepositoryScanningConfiguration - destAddr := &col - if err := awsAwsjson11_deserializeDocumentRepositoryScanningConfiguration(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentResource(v **types.Resource, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.Resource - if *v == nil { - sv = &types.Resource{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "details": - if err := awsAwsjson11_deserializeDocumentResourceDetails(&sv.Details, value); err != nil { - return err - } - - case "id": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ResourceId to be of type string, got %T instead", value) - } - sv.Id = ptr.String(jtv) - } - - case "tags": - if err := awsAwsjson11_deserializeDocumentTags(&sv.Tags, value); err != nil { - return err - } - - case "type": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Type to be of type string, got %T instead", value) - } - sv.Type = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentResourceDetails(v **types.ResourceDetails, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ResourceDetails - if *v == nil { - sv = &types.ResourceDetails{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "awsEcrContainerImage": - if err := awsAwsjson11_deserializeDocumentAwsEcrContainerImageDetails(&sv.AwsEcrContainerImage, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentResourceList(v *[]types.Resource, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.Resource - if *v == nil { - cv = []types.Resource{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.Resource - destAddr := &col - if err := awsAwsjson11_deserializeDocumentResource(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentScanningRepositoryFilter(v **types.ScanningRepositoryFilter, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ScanningRepositoryFilter - if *v == nil { - sv = &types.ScanningRepositoryFilter{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "filter": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScanningRepositoryFilterValue to be of type string, got %T instead", value) - } - sv.Filter = ptr.String(jtv) - } - - case "filterType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScanningRepositoryFilterType to be of type string, got %T instead", value) - } - sv.FilterType = types.ScanningRepositoryFilterType(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentScanningRepositoryFilterList(v *[]types.ScanningRepositoryFilter, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.ScanningRepositoryFilter - if *v == nil { - cv = []types.ScanningRepositoryFilter{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.ScanningRepositoryFilter - destAddr := &col - if err := awsAwsjson11_deserializeDocumentScanningRepositoryFilter(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentScanNotFoundException(v **types.ScanNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ScanNotFoundException - if *v == nil { - sv = &types.ScanNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentScoreDetails(v **types.ScoreDetails, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ScoreDetails - if *v == nil { - sv = &types.ScoreDetails{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "cvss": - if err := awsAwsjson11_deserializeDocumentCvssScoreDetails(&sv.Cvss, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentSecretNotFoundException(v **types.SecretNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.SecretNotFoundException - if *v == nil { - sv = &types.SecretNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentServerException(v **types.ServerException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ServerException - if *v == nil { - sv = &types.ServerException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentTag(v **types.Tag, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.Tag - if *v == nil { - sv = &types.Tag{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "Key": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected TagKey to be of type string, got %T instead", value) - } - sv.Key = ptr.String(jtv) - } - - case "Value": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) - } - sv.Value = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentTagList(v *[]types.Tag, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.Tag - if *v == nil { - cv = []types.Tag{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.Tag - destAddr := &col - if err := awsAwsjson11_deserializeDocumentTag(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentTags(v *map[string]string, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var mv map[string]string - if *v == nil { - mv = map[string]string{} - } else { - mv = *v - } - - for key, value := range shape { - var parsedVal string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) - } - parsedVal = jtv - } - mv[key] = parsedVal - - } - *v = mv - return nil -} - -func awsAwsjson11_deserializeDocumentTooManyTagsException(v **types.TooManyTagsException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.TooManyTagsException - if *v == nil { - sv = &types.TooManyTagsException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentUnableToAccessSecretException(v **types.UnableToAccessSecretException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnableToAccessSecretException - if *v == nil { - sv = &types.UnableToAccessSecretException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentUnableToDecryptSecretValueException(v **types.UnableToDecryptSecretValueException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnableToDecryptSecretValueException - if *v == nil { - sv = &types.UnableToDecryptSecretValueException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentUnableToGetUpstreamImageException(v **types.UnableToGetUpstreamImageException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnableToGetUpstreamImageException - if *v == nil { - sv = &types.UnableToGetUpstreamImageException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentUnableToGetUpstreamLayerException(v **types.UnableToGetUpstreamLayerException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnableToGetUpstreamLayerException - if *v == nil { - sv = &types.UnableToGetUpstreamLayerException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentUnsupportedImageTypeException(v **types.UnsupportedImageTypeException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnsupportedImageTypeException - if *v == nil { - sv = &types.UnsupportedImageTypeException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentUnsupportedUpstreamRegistryException(v **types.UnsupportedUpstreamRegistryException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnsupportedUpstreamRegistryException - if *v == nil { - sv = &types.UnsupportedUpstreamRegistryException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentUploadNotFoundException(v **types.UploadNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UploadNotFoundException - if *v == nil { - sv = &types.UploadNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentValidationException(v **types.ValidationException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ValidationException - if *v == nil { - sv = &types.ValidationException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentVulnerablePackage(v **types.VulnerablePackage, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.VulnerablePackage - if *v == nil { - sv = &types.VulnerablePackage{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "arch": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Arch to be of type string, got %T instead", value) - } - sv.Arch = ptr.String(jtv) - } - - case "epoch": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Epoch to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.Epoch = ptr.Int32(int32(i64)) - } - - case "filePath": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected FilePath to be of type string, got %T instead", value) - } - sv.FilePath = ptr.String(jtv) - } - - case "name": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected VulnerablePackageName to be of type string, got %T instead", value) - } - sv.Name = ptr.String(jtv) - } - - case "packageManager": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected PackageManager to be of type string, got %T instead", value) - } - sv.PackageManager = ptr.String(jtv) - } - - case "release": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Release to be of type string, got %T instead", value) - } - sv.Release = ptr.String(jtv) - } - - case "sourceLayerHash": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected SourceLayerHash to be of type string, got %T instead", value) - } - sv.SourceLayerHash = ptr.String(jtv) - } - - case "version": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Version to be of type string, got %T instead", value) - } - sv.Version = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentVulnerablePackagesList(v *[]types.VulnerablePackage, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.VulnerablePackage - if *v == nil { - cv = []types.VulnerablePackage{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.VulnerablePackage - destAddr := &col - if err := awsAwsjson11_deserializeDocumentVulnerablePackage(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeOpDocumentBatchCheckLayerAvailabilityOutput(v **BatchCheckLayerAvailabilityOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *BatchCheckLayerAvailabilityOutput - if *v == nil { - sv = &BatchCheckLayerAvailabilityOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "failures": - if err := awsAwsjson11_deserializeDocumentLayerFailureList(&sv.Failures, value); err != nil { - return err - } - - case "layers": - if err := awsAwsjson11_deserializeDocumentLayerList(&sv.Layers, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentBatchDeleteImageOutput(v **BatchDeleteImageOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *BatchDeleteImageOutput - if *v == nil { - sv = &BatchDeleteImageOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "failures": - if err := awsAwsjson11_deserializeDocumentImageFailureList(&sv.Failures, value); err != nil { - return err - } - - case "imageIds": - if err := awsAwsjson11_deserializeDocumentImageIdentifierList(&sv.ImageIds, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentBatchGetImageOutput(v **BatchGetImageOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *BatchGetImageOutput - if *v == nil { - sv = &BatchGetImageOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "failures": - if err := awsAwsjson11_deserializeDocumentImageFailureList(&sv.Failures, value); err != nil { - return err - } - - case "images": - if err := awsAwsjson11_deserializeDocumentImageList(&sv.Images, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentBatchGetRepositoryScanningConfigurationOutput(v **BatchGetRepositoryScanningConfigurationOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *BatchGetRepositoryScanningConfigurationOutput - if *v == nil { - sv = &BatchGetRepositoryScanningConfigurationOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "failures": - if err := awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailureList(&sv.Failures, value); err != nil { - return err - } - - case "scanningConfigurations": - if err := awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationList(&sv.ScanningConfigurations, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentCompleteLayerUploadOutput(v **CompleteLayerUploadOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *CompleteLayerUploadOutput - if *v == nil { - sv = &CompleteLayerUploadOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "layerDigest": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LayerDigest to be of type string, got %T instead", value) - } - sv.LayerDigest = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - case "uploadId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) - } - sv.UploadId = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentCreatePullThroughCacheRuleOutput(v **CreatePullThroughCacheRuleOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *CreatePullThroughCacheRuleOutput - if *v == nil { - sv = &CreatePullThroughCacheRuleOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "createdAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected CreationTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "credentialArn": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected CredentialArn to be of type string, got %T instead", value) - } - sv.CredentialArn = ptr.String(jtv) - } - - case "ecrRepositoryPrefix": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected PullThroughCacheRuleRepositoryPrefix to be of type string, got %T instead", value) - } - sv.EcrRepositoryPrefix = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "upstreamRegistry": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UpstreamRegistry to be of type string, got %T instead", value) - } - sv.UpstreamRegistry = types.UpstreamRegistry(jtv) - } - - case "upstreamRegistryUrl": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Url to be of type string, got %T instead", value) - } - sv.UpstreamRegistryUrl = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentCreateRepositoryOutput(v **CreateRepositoryOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *CreateRepositoryOutput - if *v == nil { - sv = &CreateRepositoryOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "repository": - if err := awsAwsjson11_deserializeDocumentRepository(&sv.Repository, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDeleteLifecyclePolicyOutput(v **DeleteLifecyclePolicyOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DeleteLifecyclePolicyOutput - if *v == nil { - sv = &DeleteLifecyclePolicyOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "lastEvaluatedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.LastEvaluatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected EvaluationTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "lifecyclePolicyText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LifecyclePolicyText to be of type string, got %T instead", value) - } - sv.LifecyclePolicyText = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDeletePullThroughCacheRuleOutput(v **DeletePullThroughCacheRuleOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DeletePullThroughCacheRuleOutput - if *v == nil { - sv = &DeletePullThroughCacheRuleOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "createdAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected CreationTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "credentialArn": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected CredentialArn to be of type string, got %T instead", value) - } - sv.CredentialArn = ptr.String(jtv) - } - - case "ecrRepositoryPrefix": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected PullThroughCacheRuleRepositoryPrefix to be of type string, got %T instead", value) - } - sv.EcrRepositoryPrefix = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "upstreamRegistryUrl": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Url to be of type string, got %T instead", value) - } - sv.UpstreamRegistryUrl = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDeleteRegistryPolicyOutput(v **DeleteRegistryPolicyOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DeleteRegistryPolicyOutput - if *v == nil { - sv = &DeleteRegistryPolicyOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "policyText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryPolicyText to be of type string, got %T instead", value) - } - sv.PolicyText = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDeleteRepositoryOutput(v **DeleteRepositoryOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DeleteRepositoryOutput - if *v == nil { - sv = &DeleteRepositoryOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "repository": - if err := awsAwsjson11_deserializeDocumentRepository(&sv.Repository, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDeleteRepositoryPolicyOutput(v **DeleteRepositoryPolicyOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DeleteRepositoryPolicyOutput - if *v == nil { - sv = &DeleteRepositoryPolicyOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "policyText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) - } - sv.PolicyText = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDescribeImageReplicationStatusOutput(v **DescribeImageReplicationStatusOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DescribeImageReplicationStatusOutput - if *v == nil { - sv = &DescribeImageReplicationStatusOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "imageId": - if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { - return err - } - - case "replicationStatuses": - if err := awsAwsjson11_deserializeDocumentImageReplicationStatusList(&sv.ReplicationStatuses, value); err != nil { - return err - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDescribeImageScanFindingsOutput(v **DescribeImageScanFindingsOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DescribeImageScanFindingsOutput - if *v == nil { - sv = &DescribeImageScanFindingsOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "imageId": - if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { - return err - } - - case "imageScanFindings": - if err := awsAwsjson11_deserializeDocumentImageScanFindings(&sv.ImageScanFindings, value); err != nil { - return err - } - - case "imageScanStatus": - if err := awsAwsjson11_deserializeDocumentImageScanStatus(&sv.ImageScanStatus, value); err != nil { - return err - } - - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDescribeImagesOutput(v **DescribeImagesOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DescribeImagesOutput - if *v == nil { - sv = &DescribeImagesOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "imageDetails": - if err := awsAwsjson11_deserializeDocumentImageDetailList(&sv.ImageDetails, value); err != nil { - return err - } - - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDescribePullThroughCacheRulesOutput(v **DescribePullThroughCacheRulesOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DescribePullThroughCacheRulesOutput - if *v == nil { - sv = &DescribePullThroughCacheRulesOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - case "pullThroughCacheRules": - if err := awsAwsjson11_deserializeDocumentPullThroughCacheRuleList(&sv.PullThroughCacheRules, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDescribeRegistryOutput(v **DescribeRegistryOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DescribeRegistryOutput - if *v == nil { - sv = &DescribeRegistryOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "replicationConfiguration": - if err := awsAwsjson11_deserializeDocumentReplicationConfiguration(&sv.ReplicationConfiguration, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDescribeRepositoriesOutput(v **DescribeRepositoriesOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DescribeRepositoriesOutput - if *v == nil { - sv = &DescribeRepositoriesOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - case "repositories": - if err := awsAwsjson11_deserializeDocumentRepositoryList(&sv.Repositories, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentGetAuthorizationTokenOutput(v **GetAuthorizationTokenOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *GetAuthorizationTokenOutput - if *v == nil { - sv = &GetAuthorizationTokenOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "authorizationData": - if err := awsAwsjson11_deserializeDocumentAuthorizationDataList(&sv.AuthorizationData, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentGetDownloadUrlForLayerOutput(v **GetDownloadUrlForLayerOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *GetDownloadUrlForLayerOutput - if *v == nil { - sv = &GetDownloadUrlForLayerOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "downloadUrl": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Url to be of type string, got %T instead", value) - } - sv.DownloadUrl = ptr.String(jtv) - } - - case "layerDigest": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LayerDigest to be of type string, got %T instead", value) - } - sv.LayerDigest = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentGetLifecyclePolicyOutput(v **GetLifecyclePolicyOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *GetLifecyclePolicyOutput - if *v == nil { - sv = &GetLifecyclePolicyOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "lastEvaluatedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.LastEvaluatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected EvaluationTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "lifecyclePolicyText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LifecyclePolicyText to be of type string, got %T instead", value) - } - sv.LifecyclePolicyText = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentGetLifecyclePolicyPreviewOutput(v **GetLifecyclePolicyPreviewOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *GetLifecyclePolicyPreviewOutput - if *v == nil { - sv = &GetLifecyclePolicyPreviewOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "lifecyclePolicyText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LifecyclePolicyText to be of type string, got %T instead", value) - } - sv.LifecyclePolicyText = ptr.String(jtv) - } - - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - case "previewResults": - if err := awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewResultList(&sv.PreviewResults, value); err != nil { - return err - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - case "status": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LifecyclePolicyPreviewStatus to be of type string, got %T instead", value) - } - sv.Status = types.LifecyclePolicyPreviewStatus(jtv) - } - - case "summary": - if err := awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewSummary(&sv.Summary, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentGetRegistryPolicyOutput(v **GetRegistryPolicyOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *GetRegistryPolicyOutput - if *v == nil { - sv = &GetRegistryPolicyOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "policyText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryPolicyText to be of type string, got %T instead", value) - } - sv.PolicyText = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentGetRegistryScanningConfigurationOutput(v **GetRegistryScanningConfigurationOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *GetRegistryScanningConfigurationOutput - if *v == nil { - sv = &GetRegistryScanningConfigurationOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "scanningConfiguration": - if err := awsAwsjson11_deserializeDocumentRegistryScanningConfiguration(&sv.ScanningConfiguration, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentGetRepositoryPolicyOutput(v **GetRepositoryPolicyOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *GetRepositoryPolicyOutput - if *v == nil { - sv = &GetRepositoryPolicyOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "policyText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) - } - sv.PolicyText = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentInitiateLayerUploadOutput(v **InitiateLayerUploadOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *InitiateLayerUploadOutput - if *v == nil { - sv = &InitiateLayerUploadOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "partSize": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected PartSize to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.PartSize = ptr.Int64(i64) - } - - case "uploadId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) - } - sv.UploadId = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentListImagesOutput(v **ListImagesOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *ListImagesOutput - if *v == nil { - sv = &ListImagesOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "imageIds": - if err := awsAwsjson11_deserializeDocumentImageIdentifierList(&sv.ImageIds, value); err != nil { - return err - } - - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *ListTagsForResourceOutput - if *v == nil { - sv = &ListTagsForResourceOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "tags": - if err := awsAwsjson11_deserializeDocumentTagList(&sv.Tags, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentPutImageOutput(v **PutImageOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *PutImageOutput - if *v == nil { - sv = &PutImageOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "image": - if err := awsAwsjson11_deserializeDocumentImage(&sv.Image, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentPutImageScanningConfigurationOutput(v **PutImageScanningConfigurationOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *PutImageScanningConfigurationOutput - if *v == nil { - sv = &PutImageScanningConfigurationOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "imageScanningConfiguration": - if err := awsAwsjson11_deserializeDocumentImageScanningConfiguration(&sv.ImageScanningConfiguration, value); err != nil { - return err - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentPutImageTagMutabilityOutput(v **PutImageTagMutabilityOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *PutImageTagMutabilityOutput - if *v == nil { - sv = &PutImageTagMutabilityOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "imageTagMutability": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageTagMutability to be of type string, got %T instead", value) - } - sv.ImageTagMutability = types.ImageTagMutability(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentPutLifecyclePolicyOutput(v **PutLifecyclePolicyOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *PutLifecyclePolicyOutput - if *v == nil { - sv = &PutLifecyclePolicyOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "lifecyclePolicyText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LifecyclePolicyText to be of type string, got %T instead", value) - } - sv.LifecyclePolicyText = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentPutRegistryPolicyOutput(v **PutRegistryPolicyOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *PutRegistryPolicyOutput - if *v == nil { - sv = &PutRegistryPolicyOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "policyText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryPolicyText to be of type string, got %T instead", value) - } - sv.PolicyText = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentPutRegistryScanningConfigurationOutput(v **PutRegistryScanningConfigurationOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *PutRegistryScanningConfigurationOutput - if *v == nil { - sv = &PutRegistryScanningConfigurationOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "registryScanningConfiguration": - if err := awsAwsjson11_deserializeDocumentRegistryScanningConfiguration(&sv.RegistryScanningConfiguration, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentPutReplicationConfigurationOutput(v **PutReplicationConfigurationOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *PutReplicationConfigurationOutput - if *v == nil { - sv = &PutReplicationConfigurationOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "replicationConfiguration": - if err := awsAwsjson11_deserializeDocumentReplicationConfiguration(&sv.ReplicationConfiguration, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentSetRepositoryPolicyOutput(v **SetRepositoryPolicyOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *SetRepositoryPolicyOutput - if *v == nil { - sv = &SetRepositoryPolicyOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "policyText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) - } - sv.PolicyText = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentStartImageScanOutput(v **StartImageScanOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *StartImageScanOutput - if *v == nil { - sv = &StartImageScanOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "imageId": - if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { - return err - } - - case "imageScanStatus": - if err := awsAwsjson11_deserializeDocumentImageScanStatus(&sv.ImageScanStatus, value); err != nil { - return err - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentStartLifecyclePolicyPreviewOutput(v **StartLifecyclePolicyPreviewOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *StartLifecyclePolicyPreviewOutput - if *v == nil { - sv = &StartLifecyclePolicyPreviewOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "lifecyclePolicyText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LifecyclePolicyText to be of type string, got %T instead", value) - } - sv.LifecyclePolicyText = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - case "status": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LifecyclePolicyPreviewStatus to be of type string, got %T instead", value) - } - sv.Status = types.LifecyclePolicyPreviewStatus(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentTagResourceOutput(v **TagResourceOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *TagResourceOutput - if *v == nil { - sv = &TagResourceOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentUntagResourceOutput(v **UntagResourceOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *UntagResourceOutput - if *v == nil { - sv = &UntagResourceOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentUpdatePullThroughCacheRuleOutput(v **UpdatePullThroughCacheRuleOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *UpdatePullThroughCacheRuleOutput - if *v == nil { - sv = &UpdatePullThroughCacheRuleOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "credentialArn": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected CredentialArn to be of type string, got %T instead", value) - } - sv.CredentialArn = ptr.String(jtv) - } - - case "ecrRepositoryPrefix": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected PullThroughCacheRuleRepositoryPrefix to be of type string, got %T instead", value) - } - sv.EcrRepositoryPrefix = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "updatedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.UpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected UpdatedTimestamp to be a JSON Number, got %T instead", value) - - } - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentUploadLayerPartOutput(v **UploadLayerPartOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *UploadLayerPartOutput - if *v == nil { - sv = &UploadLayerPartOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "lastByteReceived": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected PartSize to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.LastByteReceived = ptr.Int64(i64) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - case "uploadId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) - } - sv.UploadId = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentValidatePullThroughCacheRuleOutput(v **ValidatePullThroughCacheRuleOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *ValidatePullThroughCacheRuleOutput - if *v == nil { - sv = &ValidatePullThroughCacheRuleOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "credentialArn": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected CredentialArn to be of type string, got %T instead", value) - } - sv.CredentialArn = ptr.String(jtv) - } - - case "ecrRepositoryPrefix": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected PullThroughCacheRuleRepositoryPrefix to be of type string, got %T instead", value) - } - sv.EcrRepositoryPrefix = ptr.String(jtv) - } - - case "failure": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected PTCValidateFailure to be of type string, got %T instead", value) - } - sv.Failure = ptr.String(jtv) - } - - case "isValid": - if value != nil { - jtv, ok := value.(bool) - if !ok { - return fmt.Errorf("expected IsPTCRuleValid to be of type *bool, got %T instead", value) - } - sv.IsValid = jtv - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "upstreamRegistryUrl": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Url to be of type string, got %T instead", value) - } - sv.UpstreamRegistryUrl = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type protocolErrorInfo struct { - Type string `json:"__type"` - Message string - Code any // nonstandard for awsjson but some services do present the type here -} - -func getProtocolErrorInfo(decoder *json.Decoder) (protocolErrorInfo, error) { - var errInfo protocolErrorInfo - if err := decoder.Decode(&errInfo); err != nil { - if err == io.EOF { - return errInfo, nil - } - return errInfo, err - } - - return errInfo, nil -} - -func resolveProtocolErrorType(headerType string, bodyInfo protocolErrorInfo) (string, bool) { - if len(headerType) != 0 { - return headerType, true - } else if len(bodyInfo.Type) != 0 { - return bodyInfo.Type, true - } else if code, ok := bodyInfo.Code.(string); ok && len(code) != 0 { - return code, true - } - return "", false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/doc.go deleted file mode 100644 index cd150610d4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -// Package ecr provides the API client, operations, and parameter types for Amazon -// EC2 Container Registry. -// -// Amazon Elastic Container Registry Amazon Elastic Container Registry (Amazon -// ECR) is a managed container image registry service. Customers can use the -// familiar Docker CLI, or their preferred client, to push, pull, and manage -// images. Amazon ECR provides a secure, scalable, and reliable registry for your -// Docker or Open Container Initiative (OCI) images. Amazon ECR supports private -// repositories with resource-based permissions using IAM so that specific users or -// Amazon EC2 instances can access repositories and images. Amazon ECR has service -// endpoints in each supported Region. For more information, see Amazon ECR -// endpoints (https://docs.aws.amazon.com/general/latest/gr/ecr.html) in the Amazon -// Web Services General Reference. -package ecr diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/endpoints.go deleted file mode 100644 index 0cf591df27..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/endpoints.go +++ /dev/null @@ -1,554 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - "github.com/aws/aws-sdk-go-v2/internal/endpoints" - "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" - internalendpoints "github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints" - smithyauth "github.com/aws/smithy-go/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "net/url" - "os" - "strings" -) - -// EndpointResolverOptions is the service endpoint resolver options -type EndpointResolverOptions = internalendpoints.Options - -// EndpointResolver interface for resolving service endpoints. -type EndpointResolver interface { - ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) -} - -var _ EndpointResolver = &internalendpoints.Resolver{} - -// NewDefaultEndpointResolver constructs a new service endpoint resolver -func NewDefaultEndpointResolver() *internalendpoints.Resolver { - return internalendpoints.New() -} - -// EndpointResolverFunc is a helper utility that wraps a function so it satisfies -// the EndpointResolver interface. This is useful when you want to add additional -// endpoint resolving logic, or stub out specific endpoints with custom values. -type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) - -func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return fn(region, options) -} - -// EndpointResolverFromURL returns an EndpointResolver configured using the -// provided endpoint url. By default, the resolved endpoint resolver uses the -// client region as signing region, and the endpoint source is set to -// EndpointSourceCustom.You can provide functional options to configure endpoint -// values for the resolved endpoint. -func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { - e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} - for _, fn := range optFns { - fn(&e) - } - - return EndpointResolverFunc( - func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { - if len(e.SigningRegion) == 0 { - e.SigningRegion = region - } - return e, nil - }, - ) -} - -type ResolveEndpoint struct { - Resolver EndpointResolver - Options EndpointResolverOptions -} - -func (*ResolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.Resolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - eo := m.Options - eo.Logger = middleware.GetLogger(ctx) - - var endpoint aws.Endpoint - endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) - if err != nil { - nf := (&aws.EndpointNotFoundError{}) - if errors.As(err, &nf) { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) - return next.HandleSerialize(ctx, in) - } - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(awsmiddleware.GetSigningName(ctx)) == 0 { - signingName := endpoint.SigningName - if len(signingName) == 0 { - signingName = "ecr" - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - } - ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) - ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) - return next.HandleSerialize(ctx, in) -} -func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&ResolveEndpoint{ - Resolver: o.EndpointResolver, - Options: o.EndpointOptions, - }, "OperationSerializer", middleware.Before) -} - -func removeResolveEndpointMiddleware(stack *middleware.Stack) error { - _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) - return err -} - -type wrappedEndpointResolver struct { - awsResolver aws.EndpointResolverWithOptions -} - -func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return w.awsResolver.ResolveEndpoint(ServiceID, region, options) -} - -type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) - -func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { - return a(service, region) -} - -var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) - -// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. -// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, -// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked -// via its middleware. -// -// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. -func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { - var resolver aws.EndpointResolverWithOptions - - if awsResolverWithOptions != nil { - resolver = awsResolverWithOptions - } else if awsResolver != nil { - resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) - } - - return &wrappedEndpointResolver{ - awsResolver: resolver, - } -} - -func finalizeClientEndpointResolverOptions(options *Options) { - options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() - - if len(options.EndpointOptions.ResolvedRegion) == 0 { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(options.Region, fipsInfix) || - strings.Contains(options.Region, fipsPrefix) || - strings.Contains(options.Region, fipsSuffix) { - options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled - } - } - -} - -func resolveEndpointResolverV2(options *Options) { - if options.EndpointResolverV2 == nil { - options.EndpointResolverV2 = NewDefaultEndpointResolverV2() - } -} - -func resolveBaseEndpoint(cfg aws.Config, o *Options) { - if cfg.BaseEndpoint != nil { - o.BaseEndpoint = cfg.BaseEndpoint - } - - _, g := os.LookupEnv("AWS_ENDPOINT_URL") - _, s := os.LookupEnv("AWS_ENDPOINT_URL_ECR") - - if g && !s { - return - } - - value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "ECR", cfg.ConfigSources) - if found && err == nil { - o.BaseEndpoint = &value - } -} - -func bindRegion(region string) *string { - if region == "" { - return nil - } - return aws.String(endpoints.MapFIPSRegion(region)) -} - -// EndpointParameters provides the parameters that influence how endpoints are -// resolved. -type EndpointParameters struct { - // The AWS region used to dispatch the request. - // - // Parameter is - // required. - // - // AWS::Region - Region *string - - // When true, use the dual-stack endpoint. If the configured endpoint does not - // support dual-stack, dispatching the request MAY return an error. - // - // Defaults to - // false if no value is provided. - // - // AWS::UseDualStack - UseDualStack *bool - - // When true, send this request to the FIPS-compliant regional endpoint. If the - // configured endpoint does not have a FIPS compliant endpoint, dispatching the - // request will return an error. - // - // Defaults to false if no value is - // provided. - // - // AWS::UseFIPS - UseFIPS *bool - - // Override the endpoint used to send this request - // - // Parameter is - // required. - // - // SDK::Endpoint - Endpoint *string -} - -// ValidateRequired validates required parameters are set. -func (p EndpointParameters) ValidateRequired() error { - if p.UseDualStack == nil { - return fmt.Errorf("parameter UseDualStack is required") - } - - if p.UseFIPS == nil { - return fmt.Errorf("parameter UseFIPS is required") - } - - return nil -} - -// WithDefaults returns a shallow copy of EndpointParameterswith default values -// applied to members where applicable. -func (p EndpointParameters) WithDefaults() EndpointParameters { - if p.UseDualStack == nil { - p.UseDualStack = ptr.Bool(false) - } - - if p.UseFIPS == nil { - p.UseFIPS = ptr.Bool(false) - } - return p -} - -// EndpointResolverV2 provides the interface for resolving service endpoints. -type EndpointResolverV2 interface { - // ResolveEndpoint attempts to resolve the endpoint with the provided options, - // returning the endpoint if found. Otherwise an error is returned. - ResolveEndpoint(ctx context.Context, params EndpointParameters) ( - smithyendpoints.Endpoint, error, - ) -} - -// resolver provides the implementation for resolving endpoints. -type resolver struct{} - -func NewDefaultEndpointResolverV2() EndpointResolverV2 { - return &resolver{} -} - -// ResolveEndpoint attempts to resolve the endpoint with the provided options, -// returning the endpoint if found. Otherwise an error is returned. -func (r *resolver) ResolveEndpoint( - ctx context.Context, params EndpointParameters, -) ( - endpoint smithyendpoints.Endpoint, err error, -) { - params = params.WithDefaults() - if err = params.ValidateRequired(); err != nil { - return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) - } - _UseDualStack := *params.UseDualStack - _UseFIPS := *params.UseFIPS - - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") - } - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") - } - uriString := _Endpoint - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _PartitionResult := *exprVal - _ = _PartitionResult - if _UseFIPS == true { - if _UseDualStack == true { - if true == _PartitionResult.SupportsFIPS { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://api.ecr-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") - } - } - if _UseFIPS == true { - if _PartitionResult.SupportsFIPS == true { - if _PartitionResult.Name == "aws" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://ecr-fips.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _PartitionResult.Name == "aws-us-gov" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://ecr-fips.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://api.ecr-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") - } - if _UseDualStack == true { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://api.ecr.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://api.ecr.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") -} - -type endpointParamsBinder interface { - bindEndpointParams(*EndpointParameters) -} - -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { - params := &EndpointParameters{} - - params.Region = bindRegion(options.Region) - params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) - params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) - params.Endpoint = options.BaseEndpoint - - if b, ok := input.(endpointParamsBinder); ok { - b.bindEndpointParams(params) - } - - return params -} - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.options.EndpointResolverV2 == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := bindEndpointParams(getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - if endpt.URI.RawPath == "" && req.URL.RawPath != "" { - endpt.URI.RawPath = endpt.URI.Path - } - req.URL.Scheme = endpt.URI.Scheme - req.URL.Host = endpt.URI.Host - req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) - req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) - for k := range endpt.Headers { - req.Header.Set(k, endpt.Headers.Get(k)) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) - for _, o := range opts { - rscheme.SignerProperties.SetAll(&o.SignerProperties) - } - - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/generated.json deleted file mode 100644 index 2e7e94262b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/generated.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "dependencies": { - "github.com/aws/aws-sdk-go-v2": "v1.4.0", - "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/smithy-go": "v1.4.0", - "github.com/jmespath/go-jmespath": "v0.4.0" - }, - "files": [ - "api_client.go", - "api_client_test.go", - "api_op_BatchCheckLayerAvailability.go", - "api_op_BatchDeleteImage.go", - "api_op_BatchGetImage.go", - "api_op_BatchGetRepositoryScanningConfiguration.go", - "api_op_CompleteLayerUpload.go", - "api_op_CreatePullThroughCacheRule.go", - "api_op_CreateRepository.go", - "api_op_DeleteLifecyclePolicy.go", - "api_op_DeletePullThroughCacheRule.go", - "api_op_DeleteRegistryPolicy.go", - "api_op_DeleteRepository.go", - "api_op_DeleteRepositoryPolicy.go", - "api_op_DescribeImageReplicationStatus.go", - "api_op_DescribeImageScanFindings.go", - "api_op_DescribeImages.go", - "api_op_DescribePullThroughCacheRules.go", - "api_op_DescribeRegistry.go", - "api_op_DescribeRepositories.go", - "api_op_GetAuthorizationToken.go", - "api_op_GetDownloadUrlForLayer.go", - "api_op_GetLifecyclePolicy.go", - "api_op_GetLifecyclePolicyPreview.go", - "api_op_GetRegistryPolicy.go", - "api_op_GetRegistryScanningConfiguration.go", - "api_op_GetRepositoryPolicy.go", - "api_op_InitiateLayerUpload.go", - "api_op_ListImages.go", - "api_op_ListTagsForResource.go", - "api_op_PutImage.go", - "api_op_PutImageScanningConfiguration.go", - "api_op_PutImageTagMutability.go", - "api_op_PutLifecyclePolicy.go", - "api_op_PutRegistryPolicy.go", - "api_op_PutRegistryScanningConfiguration.go", - "api_op_PutReplicationConfiguration.go", - "api_op_SetRepositoryPolicy.go", - "api_op_StartImageScan.go", - "api_op_StartLifecyclePolicyPreview.go", - "api_op_TagResource.go", - "api_op_UntagResource.go", - "api_op_UpdatePullThroughCacheRule.go", - "api_op_UploadLayerPart.go", - "api_op_ValidatePullThroughCacheRule.go", - "auth.go", - "deserializers.go", - "doc.go", - "endpoints.go", - "endpoints_config_test.go", - "endpoints_test.go", - "generated.json", - "internal/endpoints/endpoints.go", - "internal/endpoints/endpoints_test.go", - "options.go", - "protocol_test.go", - "serializers.go", - "snapshot_test.go", - "types/enums.go", - "types/errors.go", - "types/types.go", - "validators.go" - ], - "go": "1.15", - "module": "github.com/aws/aws-sdk-go-v2/service/ecr", - "unstable": false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/go_module_metadata.go deleted file mode 100644 index 8dde998fa9..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package ecr - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.27.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints/endpoints.go deleted file mode 100644 index 48d24a1e80..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints/endpoints.go +++ /dev/null @@ -1,864 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package endpoints - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" - "github.com/aws/smithy-go/logging" - "regexp" -) - -// Options is the endpoint resolver configuration options -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the - // provided logger. - LogDeprecated bool - - // ResolvedRegion is used to override the region to be resolved, rather then the - // using the value passed to the ResolveEndpoint method. This value is used by the - // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative - // name. You must not set this value directly in your application. - ResolvedRegion string - - // DisableHTTPS informs the resolver to return an endpoint that does not use the - // HTTPS scheme. - DisableHTTPS bool - - // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. - UseDualStackEndpoint aws.DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint aws.FIPSEndpointState -} - -func (o Options) GetResolvedRegion() string { - return o.ResolvedRegion -} - -func (o Options) GetDisableHTTPS() bool { - return o.DisableHTTPS -} - -func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { - return o.UseDualStackEndpoint -} - -func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { - return o.UseFIPSEndpoint -} - -func transformToSharedOptions(options Options) endpoints.Options { - return endpoints.Options{ - Logger: options.Logger, - LogDeprecated: options.LogDeprecated, - ResolvedRegion: options.ResolvedRegion, - DisableHTTPS: options.DisableHTTPS, - UseDualStackEndpoint: options.UseDualStackEndpoint, - UseFIPSEndpoint: options.UseFIPSEndpoint, - } -} - -// Resolver ECR endpoint resolver -type Resolver struct { - partitions endpoints.Partitions -} - -// ResolveEndpoint resolves the service endpoint for the given region and options -func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { - if len(region) == 0 { - return endpoint, &aws.MissingRegionError{} - } - - opt := transformToSharedOptions(options) - return r.partitions.ResolveEndpoint(region, opt) -} - -// New returns a new Resolver -func New() *Resolver { - return &Resolver{ - partitions: defaultPartitions, - } -} - -var partitionRegexp = struct { - Aws *regexp.Regexp - AwsCn *regexp.Regexp - AwsIso *regexp.Regexp - AwsIsoB *regexp.Regexp - AwsIsoE *regexp.Regexp - AwsIsoF *regexp.Regexp - AwsUsGov *regexp.Regexp -}{ - - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), - AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), - AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), - AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), - AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), - AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), - AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), -} - -var defaultPartitions = endpoints.Partitions{ - { - ID: "aws", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "api.ecr.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "ecr-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "api.ecr-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "api.ecr.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.Aws, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "af-south-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.af-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "af-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-east-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.ap-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.ap-northeast-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-2", - }: endpoints.Endpoint{ - Hostname: "api.ecr.ap-northeast-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-3", - }: endpoints.Endpoint{ - Hostname: "api.ecr.ap-northeast-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-3", - }, - }, - endpoints.EndpointKey{ - Region: "ap-south-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.ap-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-south-2", - }: endpoints.Endpoint{ - Hostname: "api.ecr.ap-south-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-south-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.ap-southeast-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-2", - }: endpoints.Endpoint{ - Hostname: "api.ecr.ap-southeast-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-3", - }: endpoints.Endpoint{ - Hostname: "api.ecr.ap-southeast-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-3", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-4", - }: endpoints.Endpoint{ - Hostname: "api.ecr.ap-southeast-4.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-4", - }, - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.ca-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "ca-west-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.ca-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "dkr-us-east-1", - }: endpoints.Endpoint{ - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "dkr-us-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "dkr-us-east-2", - }: endpoints.Endpoint{ - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "dkr-us-east-2", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "dkr-us-west-1", - }: endpoints.Endpoint{ - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "dkr-us-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "dkr-us-west-2", - }: endpoints.Endpoint{ - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "dkr-us-west-2", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "eu-central-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.eu-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-central-2", - }: endpoints.Endpoint{ - Hostname: "api.ecr.eu-central-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-central-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-north-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.eu-north-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-north-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-south-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.eu-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-south-2", - }: endpoints.Endpoint{ - Hostname: "api.ecr.eu-south-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-south-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.eu-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-2", - }: endpoints.Endpoint{ - Hostname: "api.ecr.eu-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-3", - }: endpoints.Endpoint{ - Hostname: "api.ecr.eu-west-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-3", - }, - }, - endpoints.EndpointKey{ - Region: "fips-dkr-us-east-1", - }: endpoints.Endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-dkr-us-east-2", - }: endpoints.Endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-dkr-us-west-1", - }: endpoints.Endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-dkr-us-west-2", - }: endpoints.Endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-east-1", - }: endpoints.Endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-east-2", - }: endpoints.Endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-west-1", - }: endpoints.Endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-west-2", - }: endpoints.Endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "il-central-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.il-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "il-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "me-central-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.me-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "me-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "me-south-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.me-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "me-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "sa-east-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.sa-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "sa-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-2", - }: endpoints.Endpoint{ - Hostname: "api.ecr.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-2", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - }: endpoints.Endpoint{ - Hostname: "api.ecr.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - { - ID: "aws-cn", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "api.ecr.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "api.ecr-fips.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "api.ecr-fips.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "api.ecr.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsCn, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "cn-north-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", - CredentialScope: endpoints.CredentialScope{ - Region: "cn-north-1", - }, - }, - endpoints.EndpointKey{ - Region: "cn-northwest-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", - CredentialScope: endpoints.CredentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - { - ID: "aws-iso", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "api.ecr-fips.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "api.ecr.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIso, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-iso-east-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", - CredentialScope: endpoints.CredentialScope{ - Region: "us-iso-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-iso-west-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.us-iso-west-1.c2s.ic.gov", - CredentialScope: endpoints.CredentialScope{ - Region: "us-iso-west-1", - }, - }, - }, - }, - { - ID: "aws-iso-b", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "api.ecr-fips.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "api.ecr.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoB, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-isob-east-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: endpoints.CredentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - { - ID: "aws-iso-e", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "api.ecr-fips.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "api.ecr.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoE, - IsRegionalized: true, - }, - { - ID: "aws-iso-f", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "api.ecr-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "api.ecr.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoF, - IsRegionalized: true, - }, - { - ID: "aws-us-gov", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "api.ecr.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "ecr-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "api.ecr-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "api.ecr.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsUsGov, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "dkr-us-gov-east-1", - }: endpoints.Endpoint{ - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "dkr-us-gov-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "dkr-us-gov-west-1", - }: endpoints.Endpoint{ - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "dkr-us-gov-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-dkr-us-gov-east-1", - }: endpoints.Endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-dkr-us-gov-west-1", - }: endpoints.Endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-gov-east-1", - }: endpoints.Endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-gov-west-1", - }: endpoints.Endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-gov-east-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-gov-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/options.go deleted file mode 100644 index 0de783e381..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/options.go +++ /dev/null @@ -1,217 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" -) - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. - EndpointResolverV2 EndpointResolverV2 - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // The auth scheme resolver which determines how to authenticate for each - // operation. - AuthSchemeResolver AuthSchemeResolver - - // The list of auth schemes supported by the client. - AuthSchemes []smithyhttp.AuthScheme -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - - return to -} - -func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { - if schemeID == "aws.auth#sigv4" { - return getSigV4IdentityResolver(o) - } - if schemeID == "smithy.api#noAuth" { - return &smithyauth.AnonymousIdentityResolver{} - } - return nil -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { - if o.Credentials != nil { - return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} - } - return nil -} - -// WithSigV4SigningName applies an override to the authentication workflow to -// use the given signing name for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing name from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningName(name string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), - middleware.Before, - ) - }) - } -} - -// WithSigV4SigningRegion applies an override to the authentication workflow to -// use the given signing region for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing region from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningRegion(region string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), - middleware.Before, - ) - }) - } -} - -func ignoreAnonymousAuth(options *Options) { - if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { - options.Credentials = nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/serializers.go deleted file mode 100644 index fbeb274d4e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/serializers.go +++ /dev/null @@ -1,3729 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "bytes" - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/encoding/httpbinding" - smithyjson "github.com/aws/smithy-go/encoding/json" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "path" -) - -type awsAwsjson11_serializeOpBatchCheckLayerAvailability struct { -} - -func (*awsAwsjson11_serializeOpBatchCheckLayerAvailability) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpBatchCheckLayerAvailability) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*BatchCheckLayerAvailabilityInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.BatchCheckLayerAvailability") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentBatchCheckLayerAvailabilityInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpBatchDeleteImage struct { -} - -func (*awsAwsjson11_serializeOpBatchDeleteImage) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpBatchDeleteImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*BatchDeleteImageInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.BatchDeleteImage") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentBatchDeleteImageInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpBatchGetImage struct { -} - -func (*awsAwsjson11_serializeOpBatchGetImage) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpBatchGetImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*BatchGetImageInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.BatchGetImage") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentBatchGetImageInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpBatchGetRepositoryScanningConfiguration struct { -} - -func (*awsAwsjson11_serializeOpBatchGetRepositoryScanningConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpBatchGetRepositoryScanningConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*BatchGetRepositoryScanningConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.BatchGetRepositoryScanningConfiguration") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentBatchGetRepositoryScanningConfigurationInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpCompleteLayerUpload struct { -} - -func (*awsAwsjson11_serializeOpCompleteLayerUpload) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpCompleteLayerUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CompleteLayerUploadInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.CompleteLayerUpload") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentCompleteLayerUploadInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpCreatePullThroughCacheRule struct { -} - -func (*awsAwsjson11_serializeOpCreatePullThroughCacheRule) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpCreatePullThroughCacheRule) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreatePullThroughCacheRuleInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.CreatePullThroughCacheRule") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentCreatePullThroughCacheRuleInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpCreateRepository struct { -} - -func (*awsAwsjson11_serializeOpCreateRepository) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpCreateRepository) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateRepositoryInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.CreateRepository") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentCreateRepositoryInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDeleteLifecyclePolicy struct { -} - -func (*awsAwsjson11_serializeOpDeleteLifecyclePolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDeleteLifecyclePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteLifecyclePolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DeleteLifecyclePolicy") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDeleteLifecyclePolicyInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDeletePullThroughCacheRule struct { -} - -func (*awsAwsjson11_serializeOpDeletePullThroughCacheRule) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDeletePullThroughCacheRule) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeletePullThroughCacheRuleInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DeletePullThroughCacheRule") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDeletePullThroughCacheRuleInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDeleteRegistryPolicy struct { -} - -func (*awsAwsjson11_serializeOpDeleteRegistryPolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDeleteRegistryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteRegistryPolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DeleteRegistryPolicy") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDeleteRegistryPolicyInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDeleteRepository struct { -} - -func (*awsAwsjson11_serializeOpDeleteRepository) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDeleteRepository) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteRepositoryInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DeleteRepository") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDeleteRepositoryInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDeleteRepositoryPolicy struct { -} - -func (*awsAwsjson11_serializeOpDeleteRepositoryPolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDeleteRepositoryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteRepositoryPolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DeleteRepositoryPolicy") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDeleteRepositoryPolicyInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDescribeImageReplicationStatus struct { -} - -func (*awsAwsjson11_serializeOpDescribeImageReplicationStatus) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDescribeImageReplicationStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DescribeImageReplicationStatusInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DescribeImageReplicationStatus") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDescribeImageReplicationStatusInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDescribeImages struct { -} - -func (*awsAwsjson11_serializeOpDescribeImages) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDescribeImages) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DescribeImagesInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DescribeImages") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDescribeImagesInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDescribeImageScanFindings struct { -} - -func (*awsAwsjson11_serializeOpDescribeImageScanFindings) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDescribeImageScanFindings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DescribeImageScanFindingsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DescribeImageScanFindings") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDescribeImageScanFindingsInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDescribePullThroughCacheRules struct { -} - -func (*awsAwsjson11_serializeOpDescribePullThroughCacheRules) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDescribePullThroughCacheRules) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DescribePullThroughCacheRulesInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DescribePullThroughCacheRules") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDescribePullThroughCacheRulesInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDescribeRegistry struct { -} - -func (*awsAwsjson11_serializeOpDescribeRegistry) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDescribeRegistry) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DescribeRegistryInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DescribeRegistry") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDescribeRegistryInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDescribeRepositories struct { -} - -func (*awsAwsjson11_serializeOpDescribeRepositories) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDescribeRepositories) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DescribeRepositoriesInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DescribeRepositories") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDescribeRepositoriesInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpGetAuthorizationToken struct { -} - -func (*awsAwsjson11_serializeOpGetAuthorizationToken) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpGetAuthorizationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetAuthorizationTokenInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.GetAuthorizationToken") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentGetAuthorizationTokenInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpGetDownloadUrlForLayer struct { -} - -func (*awsAwsjson11_serializeOpGetDownloadUrlForLayer) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpGetDownloadUrlForLayer) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetDownloadUrlForLayerInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.GetDownloadUrlForLayer") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentGetDownloadUrlForLayerInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpGetLifecyclePolicy struct { -} - -func (*awsAwsjson11_serializeOpGetLifecyclePolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpGetLifecyclePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetLifecyclePolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.GetLifecyclePolicy") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentGetLifecyclePolicyInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpGetLifecyclePolicyPreview struct { -} - -func (*awsAwsjson11_serializeOpGetLifecyclePolicyPreview) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpGetLifecyclePolicyPreview) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetLifecyclePolicyPreviewInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.GetLifecyclePolicyPreview") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentGetLifecyclePolicyPreviewInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpGetRegistryPolicy struct { -} - -func (*awsAwsjson11_serializeOpGetRegistryPolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpGetRegistryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetRegistryPolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.GetRegistryPolicy") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentGetRegistryPolicyInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpGetRegistryScanningConfiguration struct { -} - -func (*awsAwsjson11_serializeOpGetRegistryScanningConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpGetRegistryScanningConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetRegistryScanningConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.GetRegistryScanningConfiguration") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentGetRegistryScanningConfigurationInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpGetRepositoryPolicy struct { -} - -func (*awsAwsjson11_serializeOpGetRepositoryPolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpGetRepositoryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetRepositoryPolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.GetRepositoryPolicy") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentGetRepositoryPolicyInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpInitiateLayerUpload struct { -} - -func (*awsAwsjson11_serializeOpInitiateLayerUpload) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpInitiateLayerUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*InitiateLayerUploadInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.InitiateLayerUpload") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentInitiateLayerUploadInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpListImages struct { -} - -func (*awsAwsjson11_serializeOpListImages) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpListImages) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListImagesInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.ListImages") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentListImagesInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpListTagsForResource struct { -} - -func (*awsAwsjson11_serializeOpListTagsForResource) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpListTagsForResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListTagsForResourceInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.ListTagsForResource") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentListTagsForResourceInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpPutImage struct { -} - -func (*awsAwsjson11_serializeOpPutImage) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpPutImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutImageInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutImage") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutImageInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpPutImageScanningConfiguration struct { -} - -func (*awsAwsjson11_serializeOpPutImageScanningConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpPutImageScanningConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutImageScanningConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutImageScanningConfiguration") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutImageScanningConfigurationInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpPutImageTagMutability struct { -} - -func (*awsAwsjson11_serializeOpPutImageTagMutability) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpPutImageTagMutability) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutImageTagMutabilityInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutImageTagMutability") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutImageTagMutabilityInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpPutLifecyclePolicy struct { -} - -func (*awsAwsjson11_serializeOpPutLifecyclePolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpPutLifecyclePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutLifecyclePolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutLifecyclePolicy") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutLifecyclePolicyInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpPutRegistryPolicy struct { -} - -func (*awsAwsjson11_serializeOpPutRegistryPolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpPutRegistryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutRegistryPolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutRegistryPolicy") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutRegistryPolicyInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpPutRegistryScanningConfiguration struct { -} - -func (*awsAwsjson11_serializeOpPutRegistryScanningConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpPutRegistryScanningConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutRegistryScanningConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutRegistryScanningConfiguration") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutRegistryScanningConfigurationInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpPutReplicationConfiguration struct { -} - -func (*awsAwsjson11_serializeOpPutReplicationConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpPutReplicationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutReplicationConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutReplicationConfiguration") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutReplicationConfigurationInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpSetRepositoryPolicy struct { -} - -func (*awsAwsjson11_serializeOpSetRepositoryPolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpSetRepositoryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*SetRepositoryPolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.SetRepositoryPolicy") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentSetRepositoryPolicyInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpStartImageScan struct { -} - -func (*awsAwsjson11_serializeOpStartImageScan) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpStartImageScan) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*StartImageScanInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.StartImageScan") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentStartImageScanInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpStartLifecyclePolicyPreview struct { -} - -func (*awsAwsjson11_serializeOpStartLifecyclePolicyPreview) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpStartLifecyclePolicyPreview) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*StartLifecyclePolicyPreviewInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.StartLifecyclePolicyPreview") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentStartLifecyclePolicyPreviewInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpTagResource struct { -} - -func (*awsAwsjson11_serializeOpTagResource) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*TagResourceInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.TagResource") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpUntagResource struct { -} - -func (*awsAwsjson11_serializeOpUntagResource) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*UntagResourceInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.UntagResource") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpUpdatePullThroughCacheRule struct { -} - -func (*awsAwsjson11_serializeOpUpdatePullThroughCacheRule) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpUpdatePullThroughCacheRule) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*UpdatePullThroughCacheRuleInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.UpdatePullThroughCacheRule") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentUpdatePullThroughCacheRuleInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpUploadLayerPart struct { -} - -func (*awsAwsjson11_serializeOpUploadLayerPart) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpUploadLayerPart) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*UploadLayerPartInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.UploadLayerPart") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentUploadLayerPartInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpValidatePullThroughCacheRule struct { -} - -func (*awsAwsjson11_serializeOpValidatePullThroughCacheRule) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpValidatePullThroughCacheRule) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ValidatePullThroughCacheRuleInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.ValidatePullThroughCacheRule") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentValidatePullThroughCacheRuleInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} -func awsAwsjson11_serializeDocumentBatchedOperationLayerDigestList(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsjson11_serializeDocumentDescribeImagesFilter(v *types.DescribeImagesFilter, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if len(v.TagStatus) > 0 { - ok := object.Key("tagStatus") - ok.String(string(v.TagStatus)) - } - - return nil -} - -func awsAwsjson11_serializeDocumentEncryptionConfiguration(v *types.EncryptionConfiguration, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if len(v.EncryptionType) > 0 { - ok := object.Key("encryptionType") - ok.String(string(v.EncryptionType)) - } - - if v.KmsKey != nil { - ok := object.Key("kmsKey") - ok.String(*v.KmsKey) - } - - return nil -} - -func awsAwsjson11_serializeDocumentGetAuthorizationTokenRegistryIdList(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsjson11_serializeDocumentImageIdentifier(v *types.ImageIdentifier, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ImageDigest != nil { - ok := object.Key("imageDigest") - ok.String(*v.ImageDigest) - } - - if v.ImageTag != nil { - ok := object.Key("imageTag") - ok.String(*v.ImageTag) - } - - return nil -} - -func awsAwsjson11_serializeDocumentImageIdentifierList(v []types.ImageIdentifier, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - if err := awsAwsjson11_serializeDocumentImageIdentifier(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsjson11_serializeDocumentImageScanningConfiguration(v *types.ImageScanningConfiguration, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ScanOnPush { - ok := object.Key("scanOnPush") - ok.Boolean(v.ScanOnPush) - } - - return nil -} - -func awsAwsjson11_serializeDocumentLayerDigestList(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsjson11_serializeDocumentLifecyclePolicyPreviewFilter(v *types.LifecyclePolicyPreviewFilter, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if len(v.TagStatus) > 0 { - ok := object.Key("tagStatus") - ok.String(string(v.TagStatus)) - } - - return nil -} - -func awsAwsjson11_serializeDocumentListImagesFilter(v *types.ListImagesFilter, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if len(v.TagStatus) > 0 { - ok := object.Key("tagStatus") - ok.String(string(v.TagStatus)) - } - - return nil -} - -func awsAwsjson11_serializeDocumentMediaTypeList(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsjson11_serializeDocumentPullThroughCacheRuleRepositoryPrefixList(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsjson11_serializeDocumentRegistryScanningRule(v *types.RegistryScanningRule, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.RepositoryFilters != nil { - ok := object.Key("repositoryFilters") - if err := awsAwsjson11_serializeDocumentScanningRepositoryFilterList(v.RepositoryFilters, ok); err != nil { - return err - } - } - - if len(v.ScanFrequency) > 0 { - ok := object.Key("scanFrequency") - ok.String(string(v.ScanFrequency)) - } - - return nil -} - -func awsAwsjson11_serializeDocumentRegistryScanningRuleList(v []types.RegistryScanningRule, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - if err := awsAwsjson11_serializeDocumentRegistryScanningRule(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsjson11_serializeDocumentReplicationConfiguration(v *types.ReplicationConfiguration, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Rules != nil { - ok := object.Key("rules") - if err := awsAwsjson11_serializeDocumentReplicationRuleList(v.Rules, ok); err != nil { - return err - } - } - - return nil -} - -func awsAwsjson11_serializeDocumentReplicationDestination(v *types.ReplicationDestination, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Region != nil { - ok := object.Key("region") - ok.String(*v.Region) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - return nil -} - -func awsAwsjson11_serializeDocumentReplicationDestinationList(v []types.ReplicationDestination, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - if err := awsAwsjson11_serializeDocumentReplicationDestination(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsjson11_serializeDocumentReplicationRule(v *types.ReplicationRule, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Destinations != nil { - ok := object.Key("destinations") - if err := awsAwsjson11_serializeDocumentReplicationDestinationList(v.Destinations, ok); err != nil { - return err - } - } - - if v.RepositoryFilters != nil { - ok := object.Key("repositoryFilters") - if err := awsAwsjson11_serializeDocumentRepositoryFilterList(v.RepositoryFilters, ok); err != nil { - return err - } - } - - return nil -} - -func awsAwsjson11_serializeDocumentReplicationRuleList(v []types.ReplicationRule, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - if err := awsAwsjson11_serializeDocumentReplicationRule(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsjson11_serializeDocumentRepositoryFilter(v *types.RepositoryFilter, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Filter != nil { - ok := object.Key("filter") - ok.String(*v.Filter) - } - - if len(v.FilterType) > 0 { - ok := object.Key("filterType") - ok.String(string(v.FilterType)) - } - - return nil -} - -func awsAwsjson11_serializeDocumentRepositoryFilterList(v []types.RepositoryFilter, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - if err := awsAwsjson11_serializeDocumentRepositoryFilter(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsjson11_serializeDocumentRepositoryNameList(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsjson11_serializeDocumentScanningConfigurationRepositoryNameList(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsjson11_serializeDocumentScanningRepositoryFilter(v *types.ScanningRepositoryFilter, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Filter != nil { - ok := object.Key("filter") - ok.String(*v.Filter) - } - - if len(v.FilterType) > 0 { - ok := object.Key("filterType") - ok.String(string(v.FilterType)) - } - - return nil -} - -func awsAwsjson11_serializeDocumentScanningRepositoryFilterList(v []types.ScanningRepositoryFilter, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - if err := awsAwsjson11_serializeDocumentScanningRepositoryFilter(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsjson11_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Key != nil { - ok := object.Key("Key") - ok.String(*v.Key) - } - - if v.Value != nil { - ok := object.Key("Value") - ok.String(*v.Value) - } - - return nil -} - -func awsAwsjson11_serializeDocumentTagKeyList(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsjson11_serializeDocumentTagList(v []types.Tag, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - if err := awsAwsjson11_serializeDocumentTag(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsjson11_serializeOpDocumentBatchCheckLayerAvailabilityInput(v *BatchCheckLayerAvailabilityInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.LayerDigests != nil { - ok := object.Key("layerDigests") - if err := awsAwsjson11_serializeDocumentBatchedOperationLayerDigestList(v.LayerDigests, ok); err != nil { - return err - } - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentBatchDeleteImageInput(v *BatchDeleteImageInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ImageIds != nil { - ok := object.Key("imageIds") - if err := awsAwsjson11_serializeDocumentImageIdentifierList(v.ImageIds, ok); err != nil { - return err - } - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentBatchGetImageInput(v *BatchGetImageInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.AcceptedMediaTypes != nil { - ok := object.Key("acceptedMediaTypes") - if err := awsAwsjson11_serializeDocumentMediaTypeList(v.AcceptedMediaTypes, ok); err != nil { - return err - } - } - - if v.ImageIds != nil { - ok := object.Key("imageIds") - if err := awsAwsjson11_serializeDocumentImageIdentifierList(v.ImageIds, ok); err != nil { - return err - } - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentBatchGetRepositoryScanningConfigurationInput(v *BatchGetRepositoryScanningConfigurationInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.RepositoryNames != nil { - ok := object.Key("repositoryNames") - if err := awsAwsjson11_serializeDocumentScanningConfigurationRepositoryNameList(v.RepositoryNames, ok); err != nil { - return err - } - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentCompleteLayerUploadInput(v *CompleteLayerUploadInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.LayerDigests != nil { - ok := object.Key("layerDigests") - if err := awsAwsjson11_serializeDocumentLayerDigestList(v.LayerDigests, ok); err != nil { - return err - } - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - if v.UploadId != nil { - ok := object.Key("uploadId") - ok.String(*v.UploadId) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentCreatePullThroughCacheRuleInput(v *CreatePullThroughCacheRuleInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.CredentialArn != nil { - ok := object.Key("credentialArn") - ok.String(*v.CredentialArn) - } - - if v.EcrRepositoryPrefix != nil { - ok := object.Key("ecrRepositoryPrefix") - ok.String(*v.EcrRepositoryPrefix) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if len(v.UpstreamRegistry) > 0 { - ok := object.Key("upstreamRegistry") - ok.String(string(v.UpstreamRegistry)) - } - - if v.UpstreamRegistryUrl != nil { - ok := object.Key("upstreamRegistryUrl") - ok.String(*v.UpstreamRegistryUrl) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentCreateRepositoryInput(v *CreateRepositoryInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.EncryptionConfiguration != nil { - ok := object.Key("encryptionConfiguration") - if err := awsAwsjson11_serializeDocumentEncryptionConfiguration(v.EncryptionConfiguration, ok); err != nil { - return err - } - } - - if v.ImageScanningConfiguration != nil { - ok := object.Key("imageScanningConfiguration") - if err := awsAwsjson11_serializeDocumentImageScanningConfiguration(v.ImageScanningConfiguration, ok); err != nil { - return err - } - } - - if len(v.ImageTagMutability) > 0 { - ok := object.Key("imageTagMutability") - ok.String(string(v.ImageTagMutability)) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - if v.Tags != nil { - ok := object.Key("tags") - if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { - return err - } - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentDeleteLifecyclePolicyInput(v *DeleteLifecyclePolicyInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentDeletePullThroughCacheRuleInput(v *DeletePullThroughCacheRuleInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.EcrRepositoryPrefix != nil { - ok := object.Key("ecrRepositoryPrefix") - ok.String(*v.EcrRepositoryPrefix) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentDeleteRegistryPolicyInput(v *DeleteRegistryPolicyInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - return nil -} - -func awsAwsjson11_serializeOpDocumentDeleteRepositoryInput(v *DeleteRepositoryInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Force { - ok := object.Key("force") - ok.Boolean(v.Force) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentDeleteRepositoryPolicyInput(v *DeleteRepositoryPolicyInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentDescribeImageReplicationStatusInput(v *DescribeImageReplicationStatusInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ImageId != nil { - ok := object.Key("imageId") - if err := awsAwsjson11_serializeDocumentImageIdentifier(v.ImageId, ok); err != nil { - return err - } - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentDescribeImageScanFindingsInput(v *DescribeImageScanFindingsInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ImageId != nil { - ok := object.Key("imageId") - if err := awsAwsjson11_serializeDocumentImageIdentifier(v.ImageId, ok); err != nil { - return err - } - } - - if v.MaxResults != nil { - ok := object.Key("maxResults") - ok.Integer(*v.MaxResults) - } - - if v.NextToken != nil { - ok := object.Key("nextToken") - ok.String(*v.NextToken) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentDescribeImagesInput(v *DescribeImagesInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Filter != nil { - ok := object.Key("filter") - if err := awsAwsjson11_serializeDocumentDescribeImagesFilter(v.Filter, ok); err != nil { - return err - } - } - - if v.ImageIds != nil { - ok := object.Key("imageIds") - if err := awsAwsjson11_serializeDocumentImageIdentifierList(v.ImageIds, ok); err != nil { - return err - } - } - - if v.MaxResults != nil { - ok := object.Key("maxResults") - ok.Integer(*v.MaxResults) - } - - if v.NextToken != nil { - ok := object.Key("nextToken") - ok.String(*v.NextToken) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentDescribePullThroughCacheRulesInput(v *DescribePullThroughCacheRulesInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.EcrRepositoryPrefixes != nil { - ok := object.Key("ecrRepositoryPrefixes") - if err := awsAwsjson11_serializeDocumentPullThroughCacheRuleRepositoryPrefixList(v.EcrRepositoryPrefixes, ok); err != nil { - return err - } - } - - if v.MaxResults != nil { - ok := object.Key("maxResults") - ok.Integer(*v.MaxResults) - } - - if v.NextToken != nil { - ok := object.Key("nextToken") - ok.String(*v.NextToken) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentDescribeRegistryInput(v *DescribeRegistryInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - return nil -} - -func awsAwsjson11_serializeOpDocumentDescribeRepositoriesInput(v *DescribeRepositoriesInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.MaxResults != nil { - ok := object.Key("maxResults") - ok.Integer(*v.MaxResults) - } - - if v.NextToken != nil { - ok := object.Key("nextToken") - ok.String(*v.NextToken) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryNames != nil { - ok := object.Key("repositoryNames") - if err := awsAwsjson11_serializeDocumentRepositoryNameList(v.RepositoryNames, ok); err != nil { - return err - } - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentGetAuthorizationTokenInput(v *GetAuthorizationTokenInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.RegistryIds != nil { - ok := object.Key("registryIds") - if err := awsAwsjson11_serializeDocumentGetAuthorizationTokenRegistryIdList(v.RegistryIds, ok); err != nil { - return err - } - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentGetDownloadUrlForLayerInput(v *GetDownloadUrlForLayerInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.LayerDigest != nil { - ok := object.Key("layerDigest") - ok.String(*v.LayerDigest) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentGetLifecyclePolicyInput(v *GetLifecyclePolicyInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentGetLifecyclePolicyPreviewInput(v *GetLifecyclePolicyPreviewInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Filter != nil { - ok := object.Key("filter") - if err := awsAwsjson11_serializeDocumentLifecyclePolicyPreviewFilter(v.Filter, ok); err != nil { - return err - } - } - - if v.ImageIds != nil { - ok := object.Key("imageIds") - if err := awsAwsjson11_serializeDocumentImageIdentifierList(v.ImageIds, ok); err != nil { - return err - } - } - - if v.MaxResults != nil { - ok := object.Key("maxResults") - ok.Integer(*v.MaxResults) - } - - if v.NextToken != nil { - ok := object.Key("nextToken") - ok.String(*v.NextToken) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentGetRegistryPolicyInput(v *GetRegistryPolicyInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - return nil -} - -func awsAwsjson11_serializeOpDocumentGetRegistryScanningConfigurationInput(v *GetRegistryScanningConfigurationInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - return nil -} - -func awsAwsjson11_serializeOpDocumentGetRepositoryPolicyInput(v *GetRepositoryPolicyInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentInitiateLayerUploadInput(v *InitiateLayerUploadInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentListImagesInput(v *ListImagesInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Filter != nil { - ok := object.Key("filter") - if err := awsAwsjson11_serializeDocumentListImagesFilter(v.Filter, ok); err != nil { - return err - } - } - - if v.MaxResults != nil { - ok := object.Key("maxResults") - ok.Integer(*v.MaxResults) - } - - if v.NextToken != nil { - ok := object.Key("nextToken") - ok.String(*v.NextToken) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentListTagsForResourceInput(v *ListTagsForResourceInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ResourceArn != nil { - ok := object.Key("resourceArn") - ok.String(*v.ResourceArn) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentPutImageInput(v *PutImageInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ImageDigest != nil { - ok := object.Key("imageDigest") - ok.String(*v.ImageDigest) - } - - if v.ImageManifest != nil { - ok := object.Key("imageManifest") - ok.String(*v.ImageManifest) - } - - if v.ImageManifestMediaType != nil { - ok := object.Key("imageManifestMediaType") - ok.String(*v.ImageManifestMediaType) - } - - if v.ImageTag != nil { - ok := object.Key("imageTag") - ok.String(*v.ImageTag) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentPutImageScanningConfigurationInput(v *PutImageScanningConfigurationInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ImageScanningConfiguration != nil { - ok := object.Key("imageScanningConfiguration") - if err := awsAwsjson11_serializeDocumentImageScanningConfiguration(v.ImageScanningConfiguration, ok); err != nil { - return err - } - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentPutImageTagMutabilityInput(v *PutImageTagMutabilityInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if len(v.ImageTagMutability) > 0 { - ok := object.Key("imageTagMutability") - ok.String(string(v.ImageTagMutability)) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentPutLifecyclePolicyInput(v *PutLifecyclePolicyInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.LifecyclePolicyText != nil { - ok := object.Key("lifecyclePolicyText") - ok.String(*v.LifecyclePolicyText) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentPutRegistryPolicyInput(v *PutRegistryPolicyInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.PolicyText != nil { - ok := object.Key("policyText") - ok.String(*v.PolicyText) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentPutRegistryScanningConfigurationInput(v *PutRegistryScanningConfigurationInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Rules != nil { - ok := object.Key("rules") - if err := awsAwsjson11_serializeDocumentRegistryScanningRuleList(v.Rules, ok); err != nil { - return err - } - } - - if len(v.ScanType) > 0 { - ok := object.Key("scanType") - ok.String(string(v.ScanType)) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentPutReplicationConfigurationInput(v *PutReplicationConfigurationInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ReplicationConfiguration != nil { - ok := object.Key("replicationConfiguration") - if err := awsAwsjson11_serializeDocumentReplicationConfiguration(v.ReplicationConfiguration, ok); err != nil { - return err - } - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentSetRepositoryPolicyInput(v *SetRepositoryPolicyInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Force { - ok := object.Key("force") - ok.Boolean(v.Force) - } - - if v.PolicyText != nil { - ok := object.Key("policyText") - ok.String(*v.PolicyText) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentStartImageScanInput(v *StartImageScanInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ImageId != nil { - ok := object.Key("imageId") - if err := awsAwsjson11_serializeDocumentImageIdentifier(v.ImageId, ok); err != nil { - return err - } - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentStartLifecyclePolicyPreviewInput(v *StartLifecyclePolicyPreviewInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.LifecyclePolicyText != nil { - ok := object.Key("lifecyclePolicyText") - ok.String(*v.LifecyclePolicyText) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ResourceArn != nil { - ok := object.Key("resourceArn") - ok.String(*v.ResourceArn) - } - - if v.Tags != nil { - ok := object.Key("tags") - if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { - return err - } - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ResourceArn != nil { - ok := object.Key("resourceArn") - ok.String(*v.ResourceArn) - } - - if v.TagKeys != nil { - ok := object.Key("tagKeys") - if err := awsAwsjson11_serializeDocumentTagKeyList(v.TagKeys, ok); err != nil { - return err - } - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentUpdatePullThroughCacheRuleInput(v *UpdatePullThroughCacheRuleInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.CredentialArn != nil { - ok := object.Key("credentialArn") - ok.String(*v.CredentialArn) - } - - if v.EcrRepositoryPrefix != nil { - ok := object.Key("ecrRepositoryPrefix") - ok.String(*v.EcrRepositoryPrefix) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentUploadLayerPartInput(v *UploadLayerPartInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.LayerPartBlob != nil { - ok := object.Key("layerPartBlob") - ok.Base64EncodeBytes(v.LayerPartBlob) - } - - if v.PartFirstByte != nil { - ok := object.Key("partFirstByte") - ok.Long(*v.PartFirstByte) - } - - if v.PartLastByte != nil { - ok := object.Key("partLastByte") - ok.Long(*v.PartLastByte) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - if v.UploadId != nil { - ok := object.Key("uploadId") - ok.String(*v.UploadId) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentValidatePullThroughCacheRuleInput(v *ValidatePullThroughCacheRuleInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.EcrRepositoryPrefix != nil { - ok := object.Key("ecrRepositoryPrefix") - ok.String(*v.EcrRepositoryPrefix) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go deleted file mode 100644 index 1ff5684677..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go +++ /dev/null @@ -1,358 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -type EncryptionType string - -// Enum values for EncryptionType -const ( - EncryptionTypeAes256 EncryptionType = "AES256" - EncryptionTypeKms EncryptionType = "KMS" -) - -// Values returns all known values for EncryptionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (EncryptionType) Values() []EncryptionType { - return []EncryptionType{ - "AES256", - "KMS", - } -} - -type FindingSeverity string - -// Enum values for FindingSeverity -const ( - FindingSeverityInformational FindingSeverity = "INFORMATIONAL" - FindingSeverityLow FindingSeverity = "LOW" - FindingSeverityMedium FindingSeverity = "MEDIUM" - FindingSeverityHigh FindingSeverity = "HIGH" - FindingSeverityCritical FindingSeverity = "CRITICAL" - FindingSeverityUndefined FindingSeverity = "UNDEFINED" -) - -// Values returns all known values for FindingSeverity. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (FindingSeverity) Values() []FindingSeverity { - return []FindingSeverity{ - "INFORMATIONAL", - "LOW", - "MEDIUM", - "HIGH", - "CRITICAL", - "UNDEFINED", - } -} - -type ImageActionType string - -// Enum values for ImageActionType -const ( - ImageActionTypeExpire ImageActionType = "EXPIRE" -) - -// Values returns all known values for ImageActionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (ImageActionType) Values() []ImageActionType { - return []ImageActionType{ - "EXPIRE", - } -} - -type ImageFailureCode string - -// Enum values for ImageFailureCode -const ( - ImageFailureCodeInvalidImageDigest ImageFailureCode = "InvalidImageDigest" - ImageFailureCodeInvalidImageTag ImageFailureCode = "InvalidImageTag" - ImageFailureCodeImageTagDoesNotMatchDigest ImageFailureCode = "ImageTagDoesNotMatchDigest" - ImageFailureCodeImageNotFound ImageFailureCode = "ImageNotFound" - ImageFailureCodeMissingDigestAndTag ImageFailureCode = "MissingDigestAndTag" - ImageFailureCodeImageReferencedByManifestList ImageFailureCode = "ImageReferencedByManifestList" - ImageFailureCodeKmsError ImageFailureCode = "KmsError" - ImageFailureCodeUpstreamAccessDenied ImageFailureCode = "UpstreamAccessDenied" - ImageFailureCodeUpstreamTooManyRequests ImageFailureCode = "UpstreamTooManyRequests" - ImageFailureCodeUpstreamUnavailable ImageFailureCode = "UpstreamUnavailable" -) - -// Values returns all known values for ImageFailureCode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (ImageFailureCode) Values() []ImageFailureCode { - return []ImageFailureCode{ - "InvalidImageDigest", - "InvalidImageTag", - "ImageTagDoesNotMatchDigest", - "ImageNotFound", - "MissingDigestAndTag", - "ImageReferencedByManifestList", - "KmsError", - "UpstreamAccessDenied", - "UpstreamTooManyRequests", - "UpstreamUnavailable", - } -} - -type ImageTagMutability string - -// Enum values for ImageTagMutability -const ( - ImageTagMutabilityMutable ImageTagMutability = "MUTABLE" - ImageTagMutabilityImmutable ImageTagMutability = "IMMUTABLE" -) - -// Values returns all known values for ImageTagMutability. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (ImageTagMutability) Values() []ImageTagMutability { - return []ImageTagMutability{ - "MUTABLE", - "IMMUTABLE", - } -} - -type LayerAvailability string - -// Enum values for LayerAvailability -const ( - LayerAvailabilityAvailable LayerAvailability = "AVAILABLE" - LayerAvailabilityUnavailable LayerAvailability = "UNAVAILABLE" -) - -// Values returns all known values for LayerAvailability. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (LayerAvailability) Values() []LayerAvailability { - return []LayerAvailability{ - "AVAILABLE", - "UNAVAILABLE", - } -} - -type LayerFailureCode string - -// Enum values for LayerFailureCode -const ( - LayerFailureCodeInvalidLayerDigest LayerFailureCode = "InvalidLayerDigest" - LayerFailureCodeMissingLayerDigest LayerFailureCode = "MissingLayerDigest" -) - -// Values returns all known values for LayerFailureCode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (LayerFailureCode) Values() []LayerFailureCode { - return []LayerFailureCode{ - "InvalidLayerDigest", - "MissingLayerDigest", - } -} - -type LifecyclePolicyPreviewStatus string - -// Enum values for LifecyclePolicyPreviewStatus -const ( - LifecyclePolicyPreviewStatusInProgress LifecyclePolicyPreviewStatus = "IN_PROGRESS" - LifecyclePolicyPreviewStatusComplete LifecyclePolicyPreviewStatus = "COMPLETE" - LifecyclePolicyPreviewStatusExpired LifecyclePolicyPreviewStatus = "EXPIRED" - LifecyclePolicyPreviewStatusFailed LifecyclePolicyPreviewStatus = "FAILED" -) - -// Values returns all known values for LifecyclePolicyPreviewStatus. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. -func (LifecyclePolicyPreviewStatus) Values() []LifecyclePolicyPreviewStatus { - return []LifecyclePolicyPreviewStatus{ - "IN_PROGRESS", - "COMPLETE", - "EXPIRED", - "FAILED", - } -} - -type ReplicationStatus string - -// Enum values for ReplicationStatus -const ( - ReplicationStatusInProgress ReplicationStatus = "IN_PROGRESS" - ReplicationStatusComplete ReplicationStatus = "COMPLETE" - ReplicationStatusFailed ReplicationStatus = "FAILED" -) - -// Values returns all known values for ReplicationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (ReplicationStatus) Values() []ReplicationStatus { - return []ReplicationStatus{ - "IN_PROGRESS", - "COMPLETE", - "FAILED", - } -} - -type RepositoryFilterType string - -// Enum values for RepositoryFilterType -const ( - RepositoryFilterTypePrefixMatch RepositoryFilterType = "PREFIX_MATCH" -) - -// Values returns all known values for RepositoryFilterType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (RepositoryFilterType) Values() []RepositoryFilterType { - return []RepositoryFilterType{ - "PREFIX_MATCH", - } -} - -type ScanFrequency string - -// Enum values for ScanFrequency -const ( - ScanFrequencyScanOnPush ScanFrequency = "SCAN_ON_PUSH" - ScanFrequencyContinuousScan ScanFrequency = "CONTINUOUS_SCAN" - ScanFrequencyManual ScanFrequency = "MANUAL" -) - -// Values returns all known values for ScanFrequency. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (ScanFrequency) Values() []ScanFrequency { - return []ScanFrequency{ - "SCAN_ON_PUSH", - "CONTINUOUS_SCAN", - "MANUAL", - } -} - -type ScanningConfigurationFailureCode string - -// Enum values for ScanningConfigurationFailureCode -const ( - ScanningConfigurationFailureCodeRepositoryNotFound ScanningConfigurationFailureCode = "REPOSITORY_NOT_FOUND" -) - -// Values returns all known values for ScanningConfigurationFailureCode. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. -func (ScanningConfigurationFailureCode) Values() []ScanningConfigurationFailureCode { - return []ScanningConfigurationFailureCode{ - "REPOSITORY_NOT_FOUND", - } -} - -type ScanningRepositoryFilterType string - -// Enum values for ScanningRepositoryFilterType -const ( - ScanningRepositoryFilterTypeWildcard ScanningRepositoryFilterType = "WILDCARD" -) - -// Values returns all known values for ScanningRepositoryFilterType. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. -func (ScanningRepositoryFilterType) Values() []ScanningRepositoryFilterType { - return []ScanningRepositoryFilterType{ - "WILDCARD", - } -} - -type ScanStatus string - -// Enum values for ScanStatus -const ( - ScanStatusInProgress ScanStatus = "IN_PROGRESS" - ScanStatusComplete ScanStatus = "COMPLETE" - ScanStatusFailed ScanStatus = "FAILED" - ScanStatusUnsupportedImage ScanStatus = "UNSUPPORTED_IMAGE" - ScanStatusActive ScanStatus = "ACTIVE" - ScanStatusPending ScanStatus = "PENDING" - ScanStatusScanEligibilityExpired ScanStatus = "SCAN_ELIGIBILITY_EXPIRED" - ScanStatusFindingsUnavailable ScanStatus = "FINDINGS_UNAVAILABLE" -) - -// Values returns all known values for ScanStatus. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. -func (ScanStatus) Values() []ScanStatus { - return []ScanStatus{ - "IN_PROGRESS", - "COMPLETE", - "FAILED", - "UNSUPPORTED_IMAGE", - "ACTIVE", - "PENDING", - "SCAN_ELIGIBILITY_EXPIRED", - "FINDINGS_UNAVAILABLE", - } -} - -type ScanType string - -// Enum values for ScanType -const ( - ScanTypeBasic ScanType = "BASIC" - ScanTypeEnhanced ScanType = "ENHANCED" -) - -// Values returns all known values for ScanType. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. -func (ScanType) Values() []ScanType { - return []ScanType{ - "BASIC", - "ENHANCED", - } -} - -type TagStatus string - -// Enum values for TagStatus -const ( - TagStatusTagged TagStatus = "TAGGED" - TagStatusUntagged TagStatus = "UNTAGGED" - TagStatusAny TagStatus = "ANY" -) - -// Values returns all known values for TagStatus. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. -func (TagStatus) Values() []TagStatus { - return []TagStatus{ - "TAGGED", - "UNTAGGED", - "ANY", - } -} - -type UpstreamRegistry string - -// Enum values for UpstreamRegistry -const ( - UpstreamRegistryEcrPublic UpstreamRegistry = "ecr-public" - UpstreamRegistryQuay UpstreamRegistry = "quay" - UpstreamRegistryK8s UpstreamRegistry = "k8s" - UpstreamRegistryDockerHub UpstreamRegistry = "docker-hub" - UpstreamRegistryGitHubContainerRegistry UpstreamRegistry = "github-container-registry" - UpstreamRegistryAzureContainerRegistry UpstreamRegistry = "azure-container-registry" -) - -// Values returns all known values for UpstreamRegistry. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (UpstreamRegistry) Values() []UpstreamRegistry { - return []UpstreamRegistry{ - "ecr-public", - "quay", - "k8s", - "docker-hub", - "github-container-registry", - "azure-container-registry", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go deleted file mode 100644 index 01365bf0bf..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go +++ /dev/null @@ -1,1043 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - "fmt" - smithy "github.com/aws/smithy-go" -) - -// The specified layer upload does not contain any layer parts. -type EmptyUploadException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *EmptyUploadException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *EmptyUploadException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *EmptyUploadException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "EmptyUploadException" - } - return *e.ErrorCodeOverride -} -func (e *EmptyUploadException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified image has already been pushed, and there were no changes to the -// manifest or image tag after the last push. -type ImageAlreadyExistsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ImageAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ImageAlreadyExistsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ImageAlreadyExistsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ImageAlreadyExistsException" - } - return *e.ErrorCodeOverride -} -func (e *ImageAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified image digest does not match the digest that Amazon ECR calculated -// for the image. -type ImageDigestDoesNotMatchException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ImageDigestDoesNotMatchException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ImageDigestDoesNotMatchException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ImageDigestDoesNotMatchException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ImageDigestDoesNotMatchException" - } - return *e.ErrorCodeOverride -} -func (e *ImageDigestDoesNotMatchException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The image requested does not exist in the specified repository. -type ImageNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ImageNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ImageNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ImageNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ImageNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *ImageNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified image is tagged with a tag that already exists. The repository is -// configured for tag immutability. -type ImageTagAlreadyExistsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ImageTagAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ImageTagAlreadyExistsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ImageTagAlreadyExistsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ImageTagAlreadyExistsException" - } - return *e.ErrorCodeOverride -} -func (e *ImageTagAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The layer digest calculation performed by Amazon ECR upon receipt of the image -// layer does not match the digest specified. -type InvalidLayerException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidLayerException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidLayerException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidLayerException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidLayerException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidLayerException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The layer part size is not valid, or the first byte specified is not -// consecutive to the last byte of a previous layer part upload. -type InvalidLayerPartException struct { - Message *string - - ErrorCodeOverride *string - - RegistryId *string - RepositoryName *string - UploadId *string - LastValidByteReceived *int64 - - noSmithyDocumentSerde -} - -func (e *InvalidLayerPartException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidLayerPartException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidLayerPartException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidLayerPartException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidLayerPartException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified parameter is invalid. Review the available parameters for the API -// request. -type InvalidParameterException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidParameterException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidParameterException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidParameterException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidParameterException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// An invalid parameter has been specified. Tag keys can have a maximum character -// length of 128 characters, and tag values can have a maximum length of 256 -// characters. -type InvalidTagParameterException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidTagParameterException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidTagParameterException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidTagParameterException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidTagParameterException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidTagParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The operation failed due to a KMS exception. -type KmsException struct { - Message *string - - ErrorCodeOverride *string - - KmsError *string - - noSmithyDocumentSerde -} - -func (e *KmsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *KmsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *KmsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "KmsException" - } - return *e.ErrorCodeOverride -} -func (e *KmsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The image layer already exists in the associated repository. -type LayerAlreadyExistsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LayerAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LayerAlreadyExistsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LayerAlreadyExistsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LayerAlreadyExistsException" - } - return *e.ErrorCodeOverride -} -func (e *LayerAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified layer is not available because it is not associated with an -// image. Unassociated image layers may be cleaned up at any time. -type LayerInaccessibleException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LayerInaccessibleException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LayerInaccessibleException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LayerInaccessibleException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LayerInaccessibleException" - } - return *e.ErrorCodeOverride -} -func (e *LayerInaccessibleException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Layer parts must be at least 5 MiB in size. -type LayerPartTooSmallException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LayerPartTooSmallException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LayerPartTooSmallException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LayerPartTooSmallException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LayerPartTooSmallException" - } - return *e.ErrorCodeOverride -} -func (e *LayerPartTooSmallException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified layers could not be found, or the specified layer is not valid -// for this repository. -type LayersNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LayersNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LayersNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LayersNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LayersNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *LayersNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The lifecycle policy could not be found, and no policy is set to the repository. -type LifecyclePolicyNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LifecyclePolicyNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LifecyclePolicyNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LifecyclePolicyNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LifecyclePolicyNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *LifecyclePolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The previous lifecycle policy preview request has not completed. Wait and try -// again. -type LifecyclePolicyPreviewInProgressException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LifecyclePolicyPreviewInProgressException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LifecyclePolicyPreviewInProgressException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LifecyclePolicyPreviewInProgressException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LifecyclePolicyPreviewInProgressException" - } - return *e.ErrorCodeOverride -} -func (e *LifecyclePolicyPreviewInProgressException) ErrorFault() smithy.ErrorFault { - return smithy.FaultClient -} - -// There is no dry run for this repository. -type LifecyclePolicyPreviewNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LifecyclePolicyPreviewNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LifecyclePolicyPreviewNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LifecyclePolicyPreviewNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LifecyclePolicyPreviewNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *LifecyclePolicyPreviewNotFoundException) ErrorFault() smithy.ErrorFault { - return smithy.FaultClient -} - -// The operation did not succeed because it would have exceeded a service limit -// for your account. For more information, see Amazon ECR service quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) -// in the Amazon Elastic Container Registry User Guide. -type LimitExceededException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LimitExceededException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LimitExceededException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LimitExceededException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LimitExceededException" - } - return *e.ErrorCodeOverride -} -func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// A pull through cache rule with these settings already exists for the private -// registry. -type PullThroughCacheRuleAlreadyExistsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *PullThroughCacheRuleAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *PullThroughCacheRuleAlreadyExistsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *PullThroughCacheRuleAlreadyExistsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "PullThroughCacheRuleAlreadyExistsException" - } - return *e.ErrorCodeOverride -} -func (e *PullThroughCacheRuleAlreadyExistsException) ErrorFault() smithy.ErrorFault { - return smithy.FaultClient -} - -// The pull through cache rule was not found. Specify a valid pull through cache -// rule and try again. -type PullThroughCacheRuleNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *PullThroughCacheRuleNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *PullThroughCacheRuleNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *PullThroughCacheRuleNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "PullThroughCacheRuleNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *PullThroughCacheRuleNotFoundException) ErrorFault() smithy.ErrorFault { - return smithy.FaultClient -} - -// The manifest list is referencing an image that does not exist. -type ReferencedImagesNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ReferencedImagesNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ReferencedImagesNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ReferencedImagesNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ReferencedImagesNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *ReferencedImagesNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The registry doesn't have an associated registry policy. -type RegistryPolicyNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RegistryPolicyNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RegistryPolicyNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RegistryPolicyNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RegistryPolicyNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *RegistryPolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified repository already exists in the specified registry. -type RepositoryAlreadyExistsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RepositoryAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RepositoryAlreadyExistsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RepositoryAlreadyExistsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RepositoryAlreadyExistsException" - } - return *e.ErrorCodeOverride -} -func (e *RepositoryAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified repository contains images. To delete a repository that contains -// images, you must force the deletion with the force parameter. -type RepositoryNotEmptyException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RepositoryNotEmptyException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RepositoryNotEmptyException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RepositoryNotEmptyException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RepositoryNotEmptyException" - } - return *e.ErrorCodeOverride -} -func (e *RepositoryNotEmptyException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified repository could not be found. Check the spelling of the -// specified repository and ensure that you are performing operations on the -// correct registry. -type RepositoryNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RepositoryNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RepositoryNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RepositoryNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RepositoryNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *RepositoryNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified repository and registry combination does not have an associated -// repository policy. -type RepositoryPolicyNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RepositoryPolicyNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RepositoryPolicyNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RepositoryPolicyNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RepositoryPolicyNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *RepositoryPolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified image scan could not be found. Ensure that image scanning is -// enabled on the repository and try again. -type ScanNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ScanNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ScanNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ScanNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ScanNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *ScanNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The ARN of the secret specified in the pull through cache rule was not found. -// Update the pull through cache rule with a valid secret ARN and try again. -type SecretNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *SecretNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *SecretNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *SecretNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "SecretNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *SecretNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// These errors are usually caused by a server-side issue. -type ServerException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ServerException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ServerException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ServerException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ServerException" - } - return *e.ErrorCodeOverride -} -func (e *ServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } - -// The list of tags on the repository is over the limit. The maximum number of -// tags that can be applied to a repository is 50. -type TooManyTagsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *TooManyTagsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *TooManyTagsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *TooManyTagsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "TooManyTagsException" - } - return *e.ErrorCodeOverride -} -func (e *TooManyTagsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The secret is unable to be accessed. Verify the resource permissions for the -// secret and try again. -type UnableToAccessSecretException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *UnableToAccessSecretException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnableToAccessSecretException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnableToAccessSecretException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnableToAccessSecretException" - } - return *e.ErrorCodeOverride -} -func (e *UnableToAccessSecretException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The secret is accessible but is unable to be decrypted. Verify the resource -// permisisons and try again. -type UnableToDecryptSecretValueException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *UnableToDecryptSecretValueException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnableToDecryptSecretValueException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnableToDecryptSecretValueException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnableToDecryptSecretValueException" - } - return *e.ErrorCodeOverride -} -func (e *UnableToDecryptSecretValueException) ErrorFault() smithy.ErrorFault { - return smithy.FaultClient -} - -// The image or images were unable to be pulled using the pull through cache rule. -// This is usually caused because of an issue with the Secrets Manager secret -// containing the credentials for the upstream registry. -type UnableToGetUpstreamImageException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *UnableToGetUpstreamImageException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnableToGetUpstreamImageException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnableToGetUpstreamImageException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnableToGetUpstreamImageException" - } - return *e.ErrorCodeOverride -} -func (e *UnableToGetUpstreamImageException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// There was an issue getting the upstream layer matching the pull through cache -// rule. -type UnableToGetUpstreamLayerException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *UnableToGetUpstreamLayerException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnableToGetUpstreamLayerException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnableToGetUpstreamLayerException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnableToGetUpstreamLayerException" - } - return *e.ErrorCodeOverride -} -func (e *UnableToGetUpstreamLayerException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The image is of a type that cannot be scanned. -type UnsupportedImageTypeException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *UnsupportedImageTypeException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnsupportedImageTypeException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnsupportedImageTypeException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnsupportedImageTypeException" - } - return *e.ErrorCodeOverride -} -func (e *UnsupportedImageTypeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified upstream registry isn't supported. -type UnsupportedUpstreamRegistryException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *UnsupportedUpstreamRegistryException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnsupportedUpstreamRegistryException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnsupportedUpstreamRegistryException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnsupportedUpstreamRegistryException" - } - return *e.ErrorCodeOverride -} -func (e *UnsupportedUpstreamRegistryException) ErrorFault() smithy.ErrorFault { - return smithy.FaultClient -} - -// The upload could not be found, or the specified upload ID is not valid for this -// repository. -type UploadNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *UploadNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UploadNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UploadNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UploadNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *UploadNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// There was an exception validating this request. -type ValidationException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ValidationException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ValidationException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ValidationException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ValidationException" - } - return *e.ErrorCodeOverride -} -func (e *ValidationException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go deleted file mode 100644 index e707591dfa..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go +++ /dev/null @@ -1,894 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - smithydocument "github.com/aws/smithy-go/document" - "time" -) - -// This data type is used in the ImageScanFinding data type. -type Attribute struct { - - // The attribute key. - // - // This member is required. - Key *string - - // The value assigned to the attribute key. - Value *string - - noSmithyDocumentSerde -} - -// An object representing authorization data for an Amazon ECR registry. -type AuthorizationData struct { - - // A base64-encoded string that contains authorization data for the specified - // Amazon ECR registry. When the string is decoded, it is presented in the format - // user:password for private registry authentication using docker login . - AuthorizationToken *string - - // The Unix time in seconds and milliseconds when the authorization token expires. - // Authorization tokens are valid for 12 hours. - ExpiresAt *time.Time - - // The registry URL to use for this authorization token in a docker login command. - // The Amazon ECR registry URL format is - // https://aws_account_id.dkr.ecr.region.amazonaws.com . For example, - // https://012345678910.dkr.ecr.us-east-1.amazonaws.com .. - ProxyEndpoint *string - - noSmithyDocumentSerde -} - -// The image details of the Amazon ECR container image. -type AwsEcrContainerImageDetails struct { - - // The architecture of the Amazon ECR container image. - Architecture *string - - // The image author of the Amazon ECR container image. - Author *string - - // The image hash of the Amazon ECR container image. - ImageHash *string - - // The image tags attached to the Amazon ECR container image. - ImageTags []string - - // The platform of the Amazon ECR container image. - Platform *string - - // The date and time the Amazon ECR container image was pushed. - PushedAt *time.Time - - // The registry the Amazon ECR container image belongs to. - Registry *string - - // The name of the repository the Amazon ECR container image resides in. - RepositoryName *string - - noSmithyDocumentSerde -} - -// The CVSS score for a finding. -type CvssScore struct { - - // The base CVSS score used for the finding. - BaseScore float64 - - // The vector string of the CVSS score. - ScoringVector *string - - // The source of the CVSS score. - Source *string - - // The version of CVSS used for the score. - Version *string - - noSmithyDocumentSerde -} - -// Details on adjustments Amazon Inspector made to the CVSS score for a finding. -type CvssScoreAdjustment struct { - - // The metric used to adjust the CVSS score. - Metric *string - - // The reason the CVSS score has been adjustment. - Reason *string - - noSmithyDocumentSerde -} - -// Information about the CVSS score. -type CvssScoreDetails struct { - - // An object that contains details about adjustment Amazon Inspector made to the - // CVSS score. - Adjustments []CvssScoreAdjustment - - // The CVSS score. - Score float64 - - // The source for the CVSS score. - ScoreSource *string - - // The vector for the CVSS score. - ScoringVector *string - - // The CVSS version used in scoring. - Version *string - - noSmithyDocumentSerde -} - -// An object representing a filter on a DescribeImages operation. -type DescribeImagesFilter struct { - - // The tag status with which to filter your DescribeImages results. You can filter - // results based on whether they are TAGGED or UNTAGGED . - TagStatus TagStatus - - noSmithyDocumentSerde -} - -// The encryption configuration for the repository. This determines how the -// contents of your repository are encrypted at rest. By default, when no -// encryption configuration is set or the AES256 encryption type is used, Amazon -// ECR uses server-side encryption with Amazon S3-managed encryption keys which -// encrypts your data at rest using an AES-256 encryption algorithm. This does not -// require any action on your part. For more control over the encryption of the -// contents of your repository, you can use server-side encryption with Key -// Management Service key stored in Key Management Service (KMS) to encrypt your -// images. For more information, see Amazon ECR encryption at rest (https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) -// in the Amazon Elastic Container Registry User Guide. -type EncryptionConfiguration struct { - - // The encryption type to use. If you use the KMS encryption type, the contents of - // the repository will be encrypted using server-side encryption with Key - // Management Service key stored in KMS. When you use KMS to encrypt your data, you - // can either use the default Amazon Web Services managed KMS key for Amazon ECR, - // or specify your own KMS key, which you already created. For more information, - // see Protecting data using server-side encryption with an KMS key stored in Key - // Management Service (SSE-KMS) (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) - // in the Amazon Simple Storage Service Console Developer Guide. If you use the - // AES256 encryption type, Amazon ECR uses server-side encryption with Amazon - // S3-managed encryption keys which encrypts the images in the repository using an - // AES-256 encryption algorithm. For more information, see Protecting data using - // server-side encryption with Amazon S3-managed encryption keys (SSE-S3) (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) - // in the Amazon Simple Storage Service Console Developer Guide. - // - // This member is required. - EncryptionType EncryptionType - - // If you use the KMS encryption type, specify the KMS key to use for encryption. - // The alias, key ID, or full ARN of the KMS key can be specified. The key must - // exist in the same Region as the repository. If no key is specified, the default - // Amazon Web Services managed KMS key for Amazon ECR will be used. - KmsKey *string - - noSmithyDocumentSerde -} - -// The details of an enhanced image scan. This is returned when enhanced scanning -// is enabled for your private registry. -type EnhancedImageScanFinding struct { - - // The Amazon Web Services account ID associated with the image. - AwsAccountId *string - - // The description of the finding. - Description *string - - // The Amazon Resource Number (ARN) of the finding. - FindingArn *string - - // The date and time that the finding was first observed. - FirstObservedAt *time.Time - - // The date and time that the finding was last observed. - LastObservedAt *time.Time - - // An object that contains the details of a package vulnerability finding. - PackageVulnerabilityDetails *PackageVulnerabilityDetails - - // An object that contains the details about how to remediate a finding. - Remediation *Remediation - - // Contains information on the resources involved in a finding. - Resources []Resource - - // The Amazon Inspector score given to the finding. - Score float64 - - // An object that contains details of the Amazon Inspector score. - ScoreDetails *ScoreDetails - - // The severity of the finding. - Severity *string - - // The status of the finding. - Status *string - - // The title of the finding. - Title *string - - // The type of the finding. - Type *string - - // The date and time the finding was last updated at. - UpdatedAt *time.Time - - noSmithyDocumentSerde -} - -// An object representing an Amazon ECR image. -type Image struct { - - // An object containing the image tag and image digest associated with an image. - ImageId *ImageIdentifier - - // The image manifest associated with the image. - ImageManifest *string - - // The manifest media type of the image. - ImageManifestMediaType *string - - // The Amazon Web Services account ID associated with the registry containing the - // image. - RegistryId *string - - // The name of the repository associated with the image. - RepositoryName *string - - noSmithyDocumentSerde -} - -// An object that describes an image returned by a DescribeImages operation. -type ImageDetail struct { - - // The artifact media type of the image. - ArtifactMediaType *string - - // The sha256 digest of the image manifest. - ImageDigest *string - - // The media type of the image manifest. - ImageManifestMediaType *string - - // The date and time, expressed in standard JavaScript date format, at which the - // current image was pushed to the repository. - ImagePushedAt *time.Time - - // A summary of the last completed image scan. - ImageScanFindingsSummary *ImageScanFindingsSummary - - // The current state of the scan. - ImageScanStatus *ImageScanStatus - - // The size, in bytes, of the image in the repository. If the image is a manifest - // list, this will be the max size of all manifests in the list. Beginning with - // Docker version 1.9, the Docker client compresses image layers before pushing - // them to a V2 Docker registry. The output of the docker images command shows the - // uncompressed image size, so it may return a larger image size than the image - // sizes returned by DescribeImages . - ImageSizeInBytes *int64 - - // The list of tags associated with this image. - ImageTags []string - - // The date and time, expressed in standard JavaScript date format, when Amazon - // ECR recorded the last image pull. Amazon ECR refreshes the last image pull - // timestamp at least once every 24 hours. For example, if you pull an image once a - // day then the lastRecordedPullTime timestamp will indicate the exact time that - // the image was last pulled. However, if you pull an image once an hour, because - // Amazon ECR refreshes the lastRecordedPullTime timestamp at least once every 24 - // hours, the result may not be the exact time that the image was last pulled. - LastRecordedPullTime *time.Time - - // The Amazon Web Services account ID associated with the registry to which this - // image belongs. - RegistryId *string - - // The name of the repository to which this image belongs. - RepositoryName *string - - noSmithyDocumentSerde -} - -// An object representing an Amazon ECR image failure. -type ImageFailure struct { - - // The code associated with the failure. - FailureCode ImageFailureCode - - // The reason for the failure. - FailureReason *string - - // The image ID associated with the failure. - ImageId *ImageIdentifier - - noSmithyDocumentSerde -} - -// An object with identifying information for an image in an Amazon ECR repository. -type ImageIdentifier struct { - - // The sha256 digest of the image manifest. - ImageDigest *string - - // The tag used for the image. - ImageTag *string - - noSmithyDocumentSerde -} - -// The status of the replication process for an image. -type ImageReplicationStatus struct { - - // The failure code for a replication that has failed. - FailureCode *string - - // The destination Region for the image replication. - Region *string - - // The Amazon Web Services account ID associated with the registry to which the - // image belongs. - RegistryId *string - - // The image replication status. - Status ReplicationStatus - - noSmithyDocumentSerde -} - -// Contains information about an image scan finding. -type ImageScanFinding struct { - - // A collection of attributes of the host from which the finding is generated. - Attributes []Attribute - - // The description of the finding. - Description *string - - // The name associated with the finding, usually a CVE number. - Name *string - - // The finding severity. - Severity FindingSeverity - - // A link containing additional details about the security vulnerability. - Uri *string - - noSmithyDocumentSerde -} - -// The details of an image scan. -type ImageScanFindings struct { - - // Details about the enhanced scan findings from Amazon Inspector. - EnhancedFindings []EnhancedImageScanFinding - - // The image vulnerability counts, sorted by severity. - FindingSeverityCounts map[string]int32 - - // The findings from the image scan. - Findings []ImageScanFinding - - // The time of the last completed image scan. - ImageScanCompletedAt *time.Time - - // The time when the vulnerability data was last scanned. - VulnerabilitySourceUpdatedAt *time.Time - - noSmithyDocumentSerde -} - -// A summary of the last completed image scan. -type ImageScanFindingsSummary struct { - - // The image vulnerability counts, sorted by severity. - FindingSeverityCounts map[string]int32 - - // The time of the last completed image scan. - ImageScanCompletedAt *time.Time - - // The time when the vulnerability data was last scanned. - VulnerabilitySourceUpdatedAt *time.Time - - noSmithyDocumentSerde -} - -// The image scanning configuration for a repository. -type ImageScanningConfiguration struct { - - // The setting that determines whether images are scanned after being pushed to a - // repository. If set to true , images will be scanned after being pushed. If this - // parameter is not specified, it will default to false and images will not be - // scanned unless a scan is manually started with the API_StartImageScan (https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_StartImageScan.html) - // API. - ScanOnPush bool - - noSmithyDocumentSerde -} - -// The current status of an image scan. -type ImageScanStatus struct { - - // The description of the image scan status. - Description *string - - // The current state of an image scan. - Status ScanStatus - - noSmithyDocumentSerde -} - -// An object representing an Amazon ECR image layer. -type Layer struct { - - // The availability status of the image layer. - LayerAvailability LayerAvailability - - // The sha256 digest of the image layer. - LayerDigest *string - - // The size, in bytes, of the image layer. - LayerSize *int64 - - // The media type of the layer, such as - // application/vnd.docker.image.rootfs.diff.tar.gzip or - // application/vnd.oci.image.layer.v1.tar+gzip . - MediaType *string - - noSmithyDocumentSerde -} - -// An object representing an Amazon ECR image layer failure. -type LayerFailure struct { - - // The failure code associated with the failure. - FailureCode LayerFailureCode - - // The reason for the failure. - FailureReason *string - - // The layer digest associated with the failure. - LayerDigest *string - - noSmithyDocumentSerde -} - -// The filter for the lifecycle policy preview. -type LifecyclePolicyPreviewFilter struct { - - // The tag status of the image. - TagStatus TagStatus - - noSmithyDocumentSerde -} - -// The result of the lifecycle policy preview. -type LifecyclePolicyPreviewResult struct { - - // The type of action to be taken. - Action *LifecyclePolicyRuleAction - - // The priority of the applied rule. - AppliedRulePriority *int32 - - // The sha256 digest of the image manifest. - ImageDigest *string - - // The date and time, expressed in standard JavaScript date format, at which the - // current image was pushed to the repository. - ImagePushedAt *time.Time - - // The list of tags associated with this image. - ImageTags []string - - noSmithyDocumentSerde -} - -// The summary of the lifecycle policy preview request. -type LifecyclePolicyPreviewSummary struct { - - // The number of expiring images. - ExpiringImageTotalCount *int32 - - noSmithyDocumentSerde -} - -// The type of action to be taken. -type LifecyclePolicyRuleAction struct { - - // The type of action to be taken. - Type ImageActionType - - noSmithyDocumentSerde -} - -// An object representing a filter on a ListImages operation. -type ListImagesFilter struct { - - // The tag status with which to filter your ListImages results. You can filter - // results based on whether they are TAGGED or UNTAGGED . - TagStatus TagStatus - - noSmithyDocumentSerde -} - -// Information about a package vulnerability finding. -type PackageVulnerabilityDetails struct { - - // An object that contains details about the CVSS score of a finding. - Cvss []CvssScore - - // One or more URLs that contain details about this vulnerability type. - ReferenceUrls []string - - // One or more vulnerabilities related to the one identified in this finding. - RelatedVulnerabilities []string - - // The source of the vulnerability information. - Source *string - - // A URL to the source of the vulnerability information. - SourceUrl *string - - // The date and time that this vulnerability was first added to the vendor's - // database. - VendorCreatedAt *time.Time - - // The severity the vendor has given to this vulnerability type. - VendorSeverity *string - - // The date and time the vendor last updated this vulnerability in their database. - VendorUpdatedAt *time.Time - - // The ID given to this vulnerability. - VulnerabilityId *string - - // The packages impacted by this vulnerability. - VulnerablePackages []VulnerablePackage - - noSmithyDocumentSerde -} - -// The details of a pull through cache rule. -type PullThroughCacheRule struct { - - // The date and time the pull through cache was created. - CreatedAt *time.Time - - // The ARN of the Secrets Manager secret associated with the pull through cache - // rule. - CredentialArn *string - - // The Amazon ECR repository prefix associated with the pull through cache rule. - EcrRepositoryPrefix *string - - // The Amazon Web Services account ID associated with the registry the pull - // through cache rule is associated with. - RegistryId *string - - // The date and time, in JavaScript date format, when the pull through cache rule - // was last updated. - UpdatedAt *time.Time - - // The name of the upstream source registry associated with the pull through cache - // rule. - UpstreamRegistry UpstreamRegistry - - // The upstream registry URL associated with the pull through cache rule. - UpstreamRegistryUrl *string - - noSmithyDocumentSerde -} - -// Details about the recommended course of action to remediate the finding. -type Recommendation struct { - - // The recommended course of action to remediate the finding. - Text *string - - // The URL address to the CVE remediation recommendations. - Url *string - - noSmithyDocumentSerde -} - -// The scanning configuration for a private registry. -type RegistryScanningConfiguration struct { - - // The scanning rules associated with the registry. - Rules []RegistryScanningRule - - // The type of scanning configured for the registry. - ScanType ScanType - - noSmithyDocumentSerde -} - -// The details of a scanning rule for a private registry. -type RegistryScanningRule struct { - - // The repository filters associated with the scanning configuration for a private - // registry. - // - // This member is required. - RepositoryFilters []ScanningRepositoryFilter - - // The frequency that scans are performed at for a private registry. When the - // ENHANCED scan type is specified, the supported scan frequencies are - // CONTINUOUS_SCAN and SCAN_ON_PUSH . When the BASIC scan type is specified, the - // SCAN_ON_PUSH scan frequency is supported. If scan on push is not specified, then - // the MANUAL scan frequency is set by default. - // - // This member is required. - ScanFrequency ScanFrequency - - noSmithyDocumentSerde -} - -// Information on how to remediate a finding. -type Remediation struct { - - // An object that contains information about the recommended course of action to - // remediate the finding. - Recommendation *Recommendation - - noSmithyDocumentSerde -} - -// The replication configuration for a registry. -type ReplicationConfiguration struct { - - // An array of objects representing the replication destinations and repository - // filters for a replication configuration. - // - // This member is required. - Rules []ReplicationRule - - noSmithyDocumentSerde -} - -// An array of objects representing the destination for a replication rule. -type ReplicationDestination struct { - - // The Region to replicate to. - // - // This member is required. - Region *string - - // The Amazon Web Services account ID of the Amazon ECR private registry to - // replicate to. When configuring cross-Region replication within your own - // registry, specify your own account ID. - // - // This member is required. - RegistryId *string - - noSmithyDocumentSerde -} - -// An array of objects representing the replication destinations and repository -// filters for a replication configuration. -type ReplicationRule struct { - - // An array of objects representing the destination for a replication rule. - // - // This member is required. - Destinations []ReplicationDestination - - // An array of objects representing the filters for a replication rule. Specifying - // a repository filter for a replication rule provides a method for controlling - // which repositories in a private registry are replicated. - RepositoryFilters []RepositoryFilter - - noSmithyDocumentSerde -} - -// An object representing a repository. -type Repository struct { - - // The date and time, in JavaScript date format, when the repository was created. - CreatedAt *time.Time - - // The encryption configuration for the repository. This determines how the - // contents of your repository are encrypted at rest. - EncryptionConfiguration *EncryptionConfiguration - - // The image scanning configuration for a repository. - ImageScanningConfiguration *ImageScanningConfiguration - - // The tag mutability setting for the repository. - ImageTagMutability ImageTagMutability - - // The Amazon Web Services account ID associated with the registry that contains - // the repository. - RegistryId *string - - // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains - // the arn:aws:ecr namespace, followed by the region of the repository, Amazon Web - // Services account ID of the repository owner, repository namespace, and - // repository name. For example, - // arn:aws:ecr:region:012345678910:repository-namespace/repository-name . - RepositoryArn *string - - // The name of the repository. - RepositoryName *string - - // The URI for the repository. You can use this URI for container image push and - // pull operations. - RepositoryUri *string - - noSmithyDocumentSerde -} - -// The filter settings used with image replication. Specifying a repository filter -// to a replication rule provides a method for controlling which repositories in a -// private registry are replicated. If no filters are added, the contents of all -// repositories are replicated. -type RepositoryFilter struct { - - // The repository filter details. When the PREFIX_MATCH filter type is specified, - // this value is required and should be the repository name prefix to configure - // replication for. - // - // This member is required. - Filter *string - - // The repository filter type. The only supported value is PREFIX_MATCH , which is - // a repository name prefix specified with the filter parameter. - // - // This member is required. - FilterType RepositoryFilterType - - noSmithyDocumentSerde -} - -// The details of the scanning configuration for a repository. -type RepositoryScanningConfiguration struct { - - // The scan filters applied to the repository. - AppliedScanFilters []ScanningRepositoryFilter - - // The ARN of the repository. - RepositoryArn *string - - // The name of the repository. - RepositoryName *string - - // The scan frequency for the repository. - ScanFrequency ScanFrequency - - // Whether or not scan on push is configured for the repository. - ScanOnPush bool - - noSmithyDocumentSerde -} - -// The details about any failures associated with the scanning configuration of a -// repository. -type RepositoryScanningConfigurationFailure struct { - - // The failure code. - FailureCode ScanningConfigurationFailureCode - - // The reason for the failure. - FailureReason *string - - // The name of the repository. - RepositoryName *string - - noSmithyDocumentSerde -} - -// Details about the resource involved in a finding. -type Resource struct { - - // An object that contains details about the resource involved in a finding. - Details *ResourceDetails - - // The ID of the resource. - Id *string - - // The tags attached to the resource. - Tags map[string]string - - // The type of resource. - Type *string - - noSmithyDocumentSerde -} - -// Contains details about the resource involved in the finding. -type ResourceDetails struct { - - // An object that contains details about the Amazon ECR container image involved - // in the finding. - AwsEcrContainerImage *AwsEcrContainerImageDetails - - noSmithyDocumentSerde -} - -// The details of a scanning repository filter. For more information on how to use -// filters, see Using filters (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#image-scanning-filters) -// in the Amazon Elastic Container Registry User Guide. -type ScanningRepositoryFilter struct { - - // The filter to use when scanning. - // - // This member is required. - Filter *string - - // The type associated with the filter. - // - // This member is required. - FilterType ScanningRepositoryFilterType - - noSmithyDocumentSerde -} - -// Information about the Amazon Inspector score given to a finding. -type ScoreDetails struct { - - // An object that contains details about the CVSS score given to a finding. - Cvss *CvssScoreDetails - - noSmithyDocumentSerde -} - -// The metadata to apply to a resource to help you categorize and organize them. -// Each tag consists of a key and a value, both of which you define. Tag keys can -// have a maximum character length of 128 characters, and tag values can have a -// maximum length of 256 characters. -type Tag struct { - - // One part of a key-value pair that make up a tag. A key is a general label that - // acts like a category for more specific tag values. - // - // This member is required. - Key *string - - // A value acts as a descriptor within a tag category (key). - // - // This member is required. - Value *string - - noSmithyDocumentSerde -} - -// Information on the vulnerable package identified by a finding. -type VulnerablePackage struct { - - // The architecture of the vulnerable package. - Arch *string - - // The epoch of the vulnerable package. - Epoch *int32 - - // The file path of the vulnerable package. - FilePath *string - - // The name of the vulnerable package. - Name *string - - // The package manager of the vulnerable package. - PackageManager *string - - // The release of the vulnerable package. - Release *string - - // The source layer hash of the vulnerable package. - SourceLayerHash *string - - // The version of the vulnerable package. - Version *string - - noSmithyDocumentSerde -} - -type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/validators.go deleted file mode 100644 index d9889a1148..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/validators.go +++ /dev/null @@ -1,1755 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecr - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -type validateOpBatchCheckLayerAvailability struct { -} - -func (*validateOpBatchCheckLayerAvailability) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpBatchCheckLayerAvailability) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*BatchCheckLayerAvailabilityInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpBatchCheckLayerAvailabilityInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpBatchDeleteImage struct { -} - -func (*validateOpBatchDeleteImage) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpBatchDeleteImage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*BatchDeleteImageInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpBatchDeleteImageInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpBatchGetImage struct { -} - -func (*validateOpBatchGetImage) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpBatchGetImage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*BatchGetImageInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpBatchGetImageInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpBatchGetRepositoryScanningConfiguration struct { -} - -func (*validateOpBatchGetRepositoryScanningConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpBatchGetRepositoryScanningConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*BatchGetRepositoryScanningConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpBatchGetRepositoryScanningConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCompleteLayerUpload struct { -} - -func (*validateOpCompleteLayerUpload) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCompleteLayerUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CompleteLayerUploadInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCompleteLayerUploadInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCreatePullThroughCacheRule struct { -} - -func (*validateOpCreatePullThroughCacheRule) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreatePullThroughCacheRule) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreatePullThroughCacheRuleInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreatePullThroughCacheRuleInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCreateRepository struct { -} - -func (*validateOpCreateRepository) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateRepository) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateRepositoryInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateRepositoryInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteLifecyclePolicy struct { -} - -func (*validateOpDeleteLifecyclePolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteLifecyclePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteLifecyclePolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteLifecyclePolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeletePullThroughCacheRule struct { -} - -func (*validateOpDeletePullThroughCacheRule) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeletePullThroughCacheRule) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeletePullThroughCacheRuleInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeletePullThroughCacheRuleInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteRepository struct { -} - -func (*validateOpDeleteRepository) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteRepository) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteRepositoryInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteRepositoryInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteRepositoryPolicy struct { -} - -func (*validateOpDeleteRepositoryPolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteRepositoryPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteRepositoryPolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteRepositoryPolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDescribeImageReplicationStatus struct { -} - -func (*validateOpDescribeImageReplicationStatus) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDescribeImageReplicationStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DescribeImageReplicationStatusInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDescribeImageReplicationStatusInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDescribeImageScanFindings struct { -} - -func (*validateOpDescribeImageScanFindings) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDescribeImageScanFindings) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DescribeImageScanFindingsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDescribeImageScanFindingsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDescribeImages struct { -} - -func (*validateOpDescribeImages) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDescribeImages) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DescribeImagesInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDescribeImagesInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetDownloadUrlForLayer struct { -} - -func (*validateOpGetDownloadUrlForLayer) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetDownloadUrlForLayer) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetDownloadUrlForLayerInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetDownloadUrlForLayerInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetLifecyclePolicy struct { -} - -func (*validateOpGetLifecyclePolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetLifecyclePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetLifecyclePolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetLifecyclePolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetLifecyclePolicyPreview struct { -} - -func (*validateOpGetLifecyclePolicyPreview) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetLifecyclePolicyPreview) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetLifecyclePolicyPreviewInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetLifecyclePolicyPreviewInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetRepositoryPolicy struct { -} - -func (*validateOpGetRepositoryPolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetRepositoryPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetRepositoryPolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetRepositoryPolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpInitiateLayerUpload struct { -} - -func (*validateOpInitiateLayerUpload) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpInitiateLayerUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*InitiateLayerUploadInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpInitiateLayerUploadInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListImages struct { -} - -func (*validateOpListImages) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListImages) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListImagesInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListImagesInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListTagsForResource struct { -} - -func (*validateOpListTagsForResource) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListTagsForResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListTagsForResourceInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListTagsForResourceInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutImage struct { -} - -func (*validateOpPutImage) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutImage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutImageInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutImageInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutImageScanningConfiguration struct { -} - -func (*validateOpPutImageScanningConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutImageScanningConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutImageScanningConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutImageScanningConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutImageTagMutability struct { -} - -func (*validateOpPutImageTagMutability) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutImageTagMutability) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutImageTagMutabilityInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutImageTagMutabilityInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutLifecyclePolicy struct { -} - -func (*validateOpPutLifecyclePolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutLifecyclePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutLifecyclePolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutLifecyclePolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutRegistryPolicy struct { -} - -func (*validateOpPutRegistryPolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutRegistryPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutRegistryPolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutRegistryPolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutRegistryScanningConfiguration struct { -} - -func (*validateOpPutRegistryScanningConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutRegistryScanningConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutRegistryScanningConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutRegistryScanningConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutReplicationConfiguration struct { -} - -func (*validateOpPutReplicationConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutReplicationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutReplicationConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutReplicationConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpSetRepositoryPolicy struct { -} - -func (*validateOpSetRepositoryPolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpSetRepositoryPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*SetRepositoryPolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpSetRepositoryPolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpStartImageScan struct { -} - -func (*validateOpStartImageScan) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpStartImageScan) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*StartImageScanInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpStartImageScanInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpStartLifecyclePolicyPreview struct { -} - -func (*validateOpStartLifecyclePolicyPreview) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpStartLifecyclePolicyPreview) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*StartLifecyclePolicyPreviewInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpStartLifecyclePolicyPreviewInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpTagResource struct { -} - -func (*validateOpTagResource) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*TagResourceInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpTagResourceInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpUntagResource struct { -} - -func (*validateOpUntagResource) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*UntagResourceInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpUntagResourceInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpUpdatePullThroughCacheRule struct { -} - -func (*validateOpUpdatePullThroughCacheRule) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpUpdatePullThroughCacheRule) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*UpdatePullThroughCacheRuleInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpUpdatePullThroughCacheRuleInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpUploadLayerPart struct { -} - -func (*validateOpUploadLayerPart) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpUploadLayerPart) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*UploadLayerPartInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpUploadLayerPartInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpValidatePullThroughCacheRule struct { -} - -func (*validateOpValidatePullThroughCacheRule) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpValidatePullThroughCacheRule) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ValidatePullThroughCacheRuleInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpValidatePullThroughCacheRuleInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -func addOpBatchCheckLayerAvailabilityValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpBatchCheckLayerAvailability{}, middleware.After) -} - -func addOpBatchDeleteImageValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpBatchDeleteImage{}, middleware.After) -} - -func addOpBatchGetImageValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpBatchGetImage{}, middleware.After) -} - -func addOpBatchGetRepositoryScanningConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpBatchGetRepositoryScanningConfiguration{}, middleware.After) -} - -func addOpCompleteLayerUploadValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCompleteLayerUpload{}, middleware.After) -} - -func addOpCreatePullThroughCacheRuleValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreatePullThroughCacheRule{}, middleware.After) -} - -func addOpCreateRepositoryValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateRepository{}, middleware.After) -} - -func addOpDeleteLifecyclePolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteLifecyclePolicy{}, middleware.After) -} - -func addOpDeletePullThroughCacheRuleValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeletePullThroughCacheRule{}, middleware.After) -} - -func addOpDeleteRepositoryValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteRepository{}, middleware.After) -} - -func addOpDeleteRepositoryPolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteRepositoryPolicy{}, middleware.After) -} - -func addOpDescribeImageReplicationStatusValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDescribeImageReplicationStatus{}, middleware.After) -} - -func addOpDescribeImageScanFindingsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDescribeImageScanFindings{}, middleware.After) -} - -func addOpDescribeImagesValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDescribeImages{}, middleware.After) -} - -func addOpGetDownloadUrlForLayerValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetDownloadUrlForLayer{}, middleware.After) -} - -func addOpGetLifecyclePolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetLifecyclePolicy{}, middleware.After) -} - -func addOpGetLifecyclePolicyPreviewValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetLifecyclePolicyPreview{}, middleware.After) -} - -func addOpGetRepositoryPolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetRepositoryPolicy{}, middleware.After) -} - -func addOpInitiateLayerUploadValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpInitiateLayerUpload{}, middleware.After) -} - -func addOpListImagesValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListImages{}, middleware.After) -} - -func addOpListTagsForResourceValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListTagsForResource{}, middleware.After) -} - -func addOpPutImageValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutImage{}, middleware.After) -} - -func addOpPutImageScanningConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutImageScanningConfiguration{}, middleware.After) -} - -func addOpPutImageTagMutabilityValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutImageTagMutability{}, middleware.After) -} - -func addOpPutLifecyclePolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutLifecyclePolicy{}, middleware.After) -} - -func addOpPutRegistryPolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutRegistryPolicy{}, middleware.After) -} - -func addOpPutRegistryScanningConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutRegistryScanningConfiguration{}, middleware.After) -} - -func addOpPutReplicationConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutReplicationConfiguration{}, middleware.After) -} - -func addOpSetRepositoryPolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpSetRepositoryPolicy{}, middleware.After) -} - -func addOpStartImageScanValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpStartImageScan{}, middleware.After) -} - -func addOpStartLifecyclePolicyPreviewValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpStartLifecyclePolicyPreview{}, middleware.After) -} - -func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpTagResource{}, middleware.After) -} - -func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) -} - -func addOpUpdatePullThroughCacheRuleValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpUpdatePullThroughCacheRule{}, middleware.After) -} - -func addOpUploadLayerPartValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpUploadLayerPart{}, middleware.After) -} - -func addOpValidatePullThroughCacheRuleValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpValidatePullThroughCacheRule{}, middleware.After) -} - -func validateEncryptionConfiguration(v *types.EncryptionConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "EncryptionConfiguration"} - if len(v.EncryptionType) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("EncryptionType")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateRegistryScanningRule(v *types.RegistryScanningRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RegistryScanningRule"} - if len(v.ScanFrequency) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("ScanFrequency")) - } - if v.RepositoryFilters == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryFilters")) - } else if v.RepositoryFilters != nil { - if err := validateScanningRepositoryFilterList(v.RepositoryFilters); err != nil { - invalidParams.AddNested("RepositoryFilters", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateRegistryScanningRuleList(v []types.RegistryScanningRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RegistryScanningRuleList"} - for i := range v { - if err := validateRegistryScanningRule(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationConfiguration(v *types.ReplicationConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationConfiguration"} - if v.Rules == nil { - invalidParams.Add(smithy.NewErrParamRequired("Rules")) - } else if v.Rules != nil { - if err := validateReplicationRuleList(v.Rules); err != nil { - invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationDestination(v *types.ReplicationDestination) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationDestination"} - if v.Region == nil { - invalidParams.Add(smithy.NewErrParamRequired("Region")) - } - if v.RegistryId == nil { - invalidParams.Add(smithy.NewErrParamRequired("RegistryId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationDestinationList(v []types.ReplicationDestination) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationDestinationList"} - for i := range v { - if err := validateReplicationDestination(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationRule(v *types.ReplicationRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationRule"} - if v.Destinations == nil { - invalidParams.Add(smithy.NewErrParamRequired("Destinations")) - } else if v.Destinations != nil { - if err := validateReplicationDestinationList(v.Destinations); err != nil { - invalidParams.AddNested("Destinations", err.(smithy.InvalidParamsError)) - } - } - if v.RepositoryFilters != nil { - if err := validateRepositoryFilterList(v.RepositoryFilters); err != nil { - invalidParams.AddNested("RepositoryFilters", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationRuleList(v []types.ReplicationRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleList"} - for i := range v { - if err := validateReplicationRule(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateRepositoryFilter(v *types.RepositoryFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RepositoryFilter"} - if v.Filter == nil { - invalidParams.Add(smithy.NewErrParamRequired("Filter")) - } - if len(v.FilterType) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("FilterType")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateRepositoryFilterList(v []types.RepositoryFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RepositoryFilterList"} - for i := range v { - if err := validateRepositoryFilter(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateScanningRepositoryFilter(v *types.ScanningRepositoryFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ScanningRepositoryFilter"} - if v.Filter == nil { - invalidParams.Add(smithy.NewErrParamRequired("Filter")) - } - if len(v.FilterType) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("FilterType")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateScanningRepositoryFilterList(v []types.ScanningRepositoryFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ScanningRepositoryFilterList"} - for i := range v { - if err := validateScanningRepositoryFilter(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTag(v *types.Tag) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Tag"} - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.Value == nil { - invalidParams.Add(smithy.NewErrParamRequired("Value")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTagList(v []types.Tag) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TagList"} - for i := range v { - if err := validateTag(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpBatchCheckLayerAvailabilityInput(v *BatchCheckLayerAvailabilityInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "BatchCheckLayerAvailabilityInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.LayerDigests == nil { - invalidParams.Add(smithy.NewErrParamRequired("LayerDigests")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpBatchDeleteImageInput(v *BatchDeleteImageInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "BatchDeleteImageInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.ImageIds == nil { - invalidParams.Add(smithy.NewErrParamRequired("ImageIds")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpBatchGetImageInput(v *BatchGetImageInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "BatchGetImageInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.ImageIds == nil { - invalidParams.Add(smithy.NewErrParamRequired("ImageIds")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpBatchGetRepositoryScanningConfigurationInput(v *BatchGetRepositoryScanningConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "BatchGetRepositoryScanningConfigurationInput"} - if v.RepositoryNames == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryNames")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCompleteLayerUploadInput(v *CompleteLayerUploadInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CompleteLayerUploadInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.UploadId == nil { - invalidParams.Add(smithy.NewErrParamRequired("UploadId")) - } - if v.LayerDigests == nil { - invalidParams.Add(smithy.NewErrParamRequired("LayerDigests")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCreatePullThroughCacheRuleInput(v *CreatePullThroughCacheRuleInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreatePullThroughCacheRuleInput"} - if v.EcrRepositoryPrefix == nil { - invalidParams.Add(smithy.NewErrParamRequired("EcrRepositoryPrefix")) - } - if v.UpstreamRegistryUrl == nil { - invalidParams.Add(smithy.NewErrParamRequired("UpstreamRegistryUrl")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCreateRepositoryInput(v *CreateRepositoryInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateRepositoryInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.Tags != nil { - if err := validateTagList(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if v.EncryptionConfiguration != nil { - if err := validateEncryptionConfiguration(v.EncryptionConfiguration); err != nil { - invalidParams.AddNested("EncryptionConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteLifecyclePolicyInput(v *DeleteLifecyclePolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteLifecyclePolicyInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeletePullThroughCacheRuleInput(v *DeletePullThroughCacheRuleInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeletePullThroughCacheRuleInput"} - if v.EcrRepositoryPrefix == nil { - invalidParams.Add(smithy.NewErrParamRequired("EcrRepositoryPrefix")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteRepositoryInput(v *DeleteRepositoryInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteRepositoryInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteRepositoryPolicyInput(v *DeleteRepositoryPolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteRepositoryPolicyInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDescribeImageReplicationStatusInput(v *DescribeImageReplicationStatusInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DescribeImageReplicationStatusInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.ImageId == nil { - invalidParams.Add(smithy.NewErrParamRequired("ImageId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDescribeImageScanFindingsInput(v *DescribeImageScanFindingsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DescribeImageScanFindingsInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.ImageId == nil { - invalidParams.Add(smithy.NewErrParamRequired("ImageId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDescribeImagesInput(v *DescribeImagesInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DescribeImagesInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetDownloadUrlForLayerInput(v *GetDownloadUrlForLayerInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetDownloadUrlForLayerInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.LayerDigest == nil { - invalidParams.Add(smithy.NewErrParamRequired("LayerDigest")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetLifecyclePolicyInput(v *GetLifecyclePolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetLifecyclePolicyInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetLifecyclePolicyPreviewInput(v *GetLifecyclePolicyPreviewInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetLifecyclePolicyPreviewInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetRepositoryPolicyInput(v *GetRepositoryPolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetRepositoryPolicyInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpInitiateLayerUploadInput(v *InitiateLayerUploadInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InitiateLayerUploadInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListImagesInput(v *ListImagesInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListImagesInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListTagsForResourceInput(v *ListTagsForResourceInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListTagsForResourceInput"} - if v.ResourceArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutImageInput(v *PutImageInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutImageInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.ImageManifest == nil { - invalidParams.Add(smithy.NewErrParamRequired("ImageManifest")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutImageScanningConfigurationInput(v *PutImageScanningConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutImageScanningConfigurationInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.ImageScanningConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("ImageScanningConfiguration")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutImageTagMutabilityInput(v *PutImageTagMutabilityInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutImageTagMutabilityInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if len(v.ImageTagMutability) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("ImageTagMutability")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutLifecyclePolicyInput(v *PutLifecyclePolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutLifecyclePolicyInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.LifecyclePolicyText == nil { - invalidParams.Add(smithy.NewErrParamRequired("LifecyclePolicyText")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutRegistryPolicyInput(v *PutRegistryPolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutRegistryPolicyInput"} - if v.PolicyText == nil { - invalidParams.Add(smithy.NewErrParamRequired("PolicyText")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutRegistryScanningConfigurationInput(v *PutRegistryScanningConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutRegistryScanningConfigurationInput"} - if v.Rules != nil { - if err := validateRegistryScanningRuleList(v.Rules); err != nil { - invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutReplicationConfigurationInput(v *PutReplicationConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutReplicationConfigurationInput"} - if v.ReplicationConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("ReplicationConfiguration")) - } else if v.ReplicationConfiguration != nil { - if err := validateReplicationConfiguration(v.ReplicationConfiguration); err != nil { - invalidParams.AddNested("ReplicationConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpSetRepositoryPolicyInput(v *SetRepositoryPolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "SetRepositoryPolicyInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.PolicyText == nil { - invalidParams.Add(smithy.NewErrParamRequired("PolicyText")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpStartImageScanInput(v *StartImageScanInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "StartImageScanInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.ImageId == nil { - invalidParams.Add(smithy.NewErrParamRequired("ImageId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpStartLifecyclePolicyPreviewInput(v *StartLifecyclePolicyPreviewInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "StartLifecyclePolicyPreviewInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpTagResourceInput(v *TagResourceInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"} - if v.ResourceArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) - } - if v.Tags == nil { - invalidParams.Add(smithy.NewErrParamRequired("Tags")) - } else if v.Tags != nil { - if err := validateTagList(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpUntagResourceInput(v *UntagResourceInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"} - if v.ResourceArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) - } - if v.TagKeys == nil { - invalidParams.Add(smithy.NewErrParamRequired("TagKeys")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpUpdatePullThroughCacheRuleInput(v *UpdatePullThroughCacheRuleInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "UpdatePullThroughCacheRuleInput"} - if v.EcrRepositoryPrefix == nil { - invalidParams.Add(smithy.NewErrParamRequired("EcrRepositoryPrefix")) - } - if v.CredentialArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("CredentialArn")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpUploadLayerPartInput(v *UploadLayerPartInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "UploadLayerPartInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.UploadId == nil { - invalidParams.Add(smithy.NewErrParamRequired("UploadId")) - } - if v.PartFirstByte == nil { - invalidParams.Add(smithy.NewErrParamRequired("PartFirstByte")) - } - if v.PartLastByte == nil { - invalidParams.Add(smithy.NewErrParamRequired("PartLastByte")) - } - if v.LayerPartBlob == nil { - invalidParams.Add(smithy.NewErrParamRequired("LayerPartBlob")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpValidatePullThroughCacheRuleInput(v *ValidatePullThroughCacheRuleInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ValidatePullThroughCacheRuleInput"} - if v.EcrRepositoryPrefix == nil { - invalidParams.Add(smithy.NewErrParamRequired("EcrRepositoryPrefix")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/CHANGELOG.md deleted file mode 100644 index 8abb8388d4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/CHANGELOG.md +++ /dev/null @@ -1,383 +0,0 @@ -# v1.23.3 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.2 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.1 (2024-02-23) - -* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.0 (2024-02-22) - -* **Feature**: Add middleware stack snapshot tests. - -# v1.22.3 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.2 (2024-02-20) - -* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. - -# v1.22.1 (2024-02-15) - -* **Bug Fix**: Correct failure to determine the error type in awsJson services that could occur when errors were modeled with a non-string `code` field. - -# v1.22.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.6 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.5 (2023-12-08) - -* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. - -# v1.21.4 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.3 (2023-12-06) - -* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. - -# v1.21.2 (2023-12-01) - -* **Bug Fix**: Correct wrapping of errors in authentication workflow. -* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.1 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.0 (2023-11-29) - -* **Feature**: Expose Options() accessor on service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.4 (2023-11-28) - -* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. - -# v1.20.3 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.2 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.1 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.2 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.1 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.0 (2023-09-18) - -* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. - -# v1.17.5 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.4 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.3 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2023-08-01) - -* No change notes available for this release. - -# v1.17.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.6 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.5 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.4 (2023-06-15) - -* No change notes available for this release. - -# v1.16.3 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.2 (2023-05-04) - -* No change notes available for this release. - -# v1.16.1 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2023-04-11) - -* **Feature**: This release will allow using registry alias as registryId in BatchDeleteImage request. - -# v1.15.8 (2023-04-10) - -* No change notes available for this release. - -# v1.15.7 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.6 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.5 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.4 (2023-02-22) - -* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. - -# v1.15.3 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2023-02-15) - -* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. -* **Bug Fix**: Correct error type parsing for restJson services. - -# v1.15.1 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2023-01-09) - -* **Feature**: This release for Amazon ECR Public makes several change to bring the SDK into sync with the API. - -# v1.14.0 (2023-01-05) - -* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - -# v1.13.22 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.21 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.20 (2022-11-30) - -* No change notes available for this release. - -# v1.13.19 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.18 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.17 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.16 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.15 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.14 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.13 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.12 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.11 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.10 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.9 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.6 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2022-02-24) - -* **Feature**: API client updated -* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2021-12-21) - -* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. - -# v1.8.2 (2021-12-02) - -* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-11-12) - -* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. - -# v1.7.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.2 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.1 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-08-27) - -* **Feature**: Updated API model to latest revision. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_client.go deleted file mode 100644 index b3758ccf43..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_client.go +++ /dev/null @@ -1,538 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/defaults" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - smithy "github.com/aws/smithy-go" - smithydocument "github.com/aws/smithy-go/document" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net" - "net/http" - "time" -) - -const ServiceID = "ECR PUBLIC" -const ServiceAPIVersion = "2020-10-30" - -// Client provides the API client to make operations call for Amazon Elastic -// Container Registry Public. -type Client struct { - options Options -} - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - resolveDefaultLogger(&options) - - setResolvedDefaultsMode(&options) - - resolveRetryer(&options) - - resolveHTTPClient(&options) - - resolveHTTPSignerV4(&options) - - resolveEndpointResolverV2(&options) - - resolveAuthSchemeResolver(&options) - - for _, fn := range optFns { - fn(&options) - } - - finalizeRetryMaxAttempts(&options) - - ignoreAnonymousAuth(&options) - - wrapWithAnonymousAuth(&options) - - resolveAuthSchemes(&options) - - client := &Client{ - options: options, - } - - return client -} - -// Options returns a copy of the client configuration. -// -// Callers SHOULD NOT perform mutations on any inner structures within client -// config. Config overrides should instead be made on a per-operation basis through -// functional options. -func (c *Client) Options() Options { - return c.options.Copy() -} - -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { - ctx = middleware.ClearStackValues(ctx) - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - finalizeOperationRetryMaxAttempts(&options, *c) - - finalizeClientEndpointResolverOptions(&options) - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) - if err != nil { - err = &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - return result, metadata, err -} - -type operationInputKey struct{} - -func setOperationInput(ctx context.Context, input interface{}) context.Context { - return middleware.WithStackValue(ctx, operationInputKey{}, input) -} - -func getOperationInput(ctx context.Context) interface{} { - return middleware.GetStackValue(ctx, operationInputKey{}) -} - -type setOperationInputMiddleware struct { -} - -func (*setOperationInputMiddleware) ID() string { - return "setOperationInput" -} - -func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - ctx = setOperationInput(ctx, in.Parameters) - return next.HandleSerialize(ctx, in) -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %v", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %v", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} -func resolveAuthSchemeResolver(options *Options) { - if options.AuthSchemeResolver == nil { - options.AuthSchemeResolver = &defaultAuthSchemeResolver{} - } -} - -func resolveAuthSchemes(options *Options) { - if options.AuthSchemes == nil { - options.AuthSchemes = []smithyhttp.AuthScheme{ - internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ - Signer: options.HTTPSignerV4, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - } - } -} - -type noSmithyDocumentSerde = smithydocument.NoSerde - -type legacyEndpointContextSetter struct { - LegacyResolver EndpointResolver -} - -func (*legacyEndpointContextSetter) ID() string { - return "legacyEndpointContextSetter" -} - -func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.LegacyResolver != nil { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) - } - - return next.HandleInitialize(ctx, in) - -} -func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { - return stack.Initialize.Add(&legacyEndpointContextSetter{ - LegacyResolver: o.EndpointResolver, - }, middleware.Before) -} - -func resolveDefaultLogger(o *Options) { - if o.Logger != nil { - return - } - o.Logger = logging.Nop{} -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -func setResolvedDefaultsMode(o *Options) { - if len(o.resolvedDefaultsMode) > 0 { - return - } - - var mode aws.DefaultsMode - mode.SetFromString(string(o.DefaultsMode)) - - if mode == aws.DefaultsModeAuto { - mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) - } - - o.resolvedDefaultsMode = mode -} - -// NewFromConfig returns a new client from the provided config. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - } - resolveAWSRetryerProvider(cfg, &opts) - resolveAWSRetryMaxAttempts(cfg, &opts) - resolveAWSRetryMode(cfg, &opts) - resolveAWSEndpointResolver(cfg, &opts) - resolveUseDualStackEndpoint(cfg, &opts) - resolveUseFIPSEndpoint(cfg, &opts) - resolveBaseEndpoint(cfg, &opts) - return New(opts, optFns...) -} - -func resolveHTTPClient(o *Options) { - var buildable *awshttp.BuildableClient - - if o.HTTPClient != nil { - var ok bool - buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return - } - } else { - buildable = awshttp.NewBuildableClient() - } - - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { - if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { - dialer.Timeout = dialerTimeout - } - }) - - buildable = buildable.WithTransportOptions(func(transport *http.Transport) { - if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { - transport.TLSHandshakeTimeout = tlsHandshakeTimeout - } - }) - } - - o.HTTPClient = buildable -} - -func resolveRetryer(o *Options) { - if o.Retryer != nil { - return - } - - if len(o.RetryMode) == 0 { - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - o.RetryMode = modeConfig.RetryMode - } - } - if len(o.RetryMode) == 0 { - o.RetryMode = aws.RetryModeStandard - } - - var standardOptions []func(*retry.StandardOptions) - if v := o.RetryMaxAttempts; v != 0 { - standardOptions = append(standardOptions, func(so *retry.StandardOptions) { - so.MaxAttempts = v - }) - } - - switch o.RetryMode { - case aws.RetryModeAdaptive: - var adaptiveOptions []func(*retry.AdaptiveModeOptions) - if len(standardOptions) != 0 { - adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { - ao.StandardOptions = append(ao.StandardOptions, standardOptions...) - }) - } - o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) - - default: - o.Retryer = retry.NewStandard(standardOptions...) - } -} - -func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { - if cfg.Retryer == nil { - return - } - o.Retryer = cfg.Retryer() -} - -func resolveAWSRetryMode(cfg aws.Config, o *Options) { - if len(cfg.RetryMode) == 0 { - return - } - o.RetryMode = cfg.RetryMode -} -func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { - if cfg.RetryMaxAttempts == 0 { - return - } - o.RetryMaxAttempts = cfg.RetryMaxAttempts -} - -func finalizeRetryMaxAttempts(o *Options) { - if o.RetryMaxAttempts == 0 { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func finalizeOperationRetryMaxAttempts(o *Options, client Client) { - if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { - if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { - return - } - o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) -} - -func addClientUserAgent(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ecrpublic", goModuleVersion) - if len(options.AppID) > 0 { - ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) - } - - return nil -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { - id := (*awsmiddleware.RequestUserAgent)(nil).ID() - mw, ok := stack.Build.Get(id) - if !ok { - mw = awsmiddleware.NewRequestUserAgent() - if err := stack.Build.Add(mw, middleware.After); err != nil { - return nil, err - } - } - - ua, ok := mw.(*awsmiddleware.RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) - } - - return ua, nil -} - -type HTTPSignerV4 interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error -} - -func resolveHTTPSignerV4(o *Options) { - if o.HTTPSignerV4 != nil { - return - } - o.HTTPSignerV4 = newDefaultV4Signer(*o) -} - -func newDefaultV4Signer(o Options) *v4.Signer { - return v4.NewSigner(func(so *v4.SignerOptions) { - so.Logger = o.Logger - so.LogSigning = o.ClientLogMode.IsSigning() - }) -} - -func addClientRequestID(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) -} - -func addComputeContentLength(stack *middleware.Stack) error { - return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) -} - -func addRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) -} - -func addRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) -} -func addStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) -} - -func addUnsignedPayload(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -func addComputePayloadSHA256(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -func addContentSHA256Header(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) -} - -func addRetry(stack *middleware.Stack, o Options) error { - attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { - m.LogAttempts = o.ClientLogMode.IsRetries() - }) - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { - return err - } - if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { - return err - } - return nil -} - -// resolves dual-stack endpoint configuration -func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseDualStackEndpoint = value - } - return nil -} - -// resolves FIPS endpoint configuration -func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseFIPSEndpoint = value - } - return nil -} - -func addRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) -} - -func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) - -} - -func addResponseErrorMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) - -} - -func addRequestResponseLogging(stack *middleware.Stack, o Options) error { - return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: o.ClientLogMode.IsRequest(), - LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), - LogResponse: o.ClientLogMode.IsResponse(), - LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), - }, middleware.After) -} - -type disableHTTPSMiddleware struct { - DisableHTTPS bool -} - -func (*disableHTTPSMiddleware) ID() string { - return "disableHTTPS" -} - -func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { - req.URL.Scheme = "http" - } - - return next.HandleFinalize(ctx, in) -} - -func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Finalize.Insert(&disableHTTPSMiddleware{ - DisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "ResolveEndpointV2", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchCheckLayerAvailability.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchCheckLayerAvailability.go deleted file mode 100644 index ab4cb1e548..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchCheckLayerAvailability.go +++ /dev/null @@ -1,155 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Checks the availability of one or more image layers that are within a -// repository in a public registry. When an image is pushed to a repository, each -// image layer is checked to verify if it has been uploaded before. If it has been -// uploaded, then the image layer is skipped. This operation is used by the Amazon -// ECR proxy and is not generally used by customers for pulling and pushing images. -// In most cases, you should use the docker CLI to pull, tag, and push images. -func (c *Client) BatchCheckLayerAvailability(ctx context.Context, params *BatchCheckLayerAvailabilityInput, optFns ...func(*Options)) (*BatchCheckLayerAvailabilityOutput, error) { - if params == nil { - params = &BatchCheckLayerAvailabilityInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "BatchCheckLayerAvailability", params, optFns, c.addOperationBatchCheckLayerAvailabilityMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*BatchCheckLayerAvailabilityOutput) - out.ResultMetadata = metadata - return out, nil -} - -type BatchCheckLayerAvailabilityInput struct { - - // The digests of the image layers to check. - // - // This member is required. - LayerDigests []string - - // The name of the repository that's associated with the image layers to check. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID, or registry alias, associated with the - // public registry that contains the image layers to check. If you do not specify a - // registry, the default public registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type BatchCheckLayerAvailabilityOutput struct { - - // Any failures associated with the call. - Failures []types.LayerFailure - - // A list of image layer objects that correspond to the image layer references in - // the request. - Layers []types.Layer - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationBatchCheckLayerAvailabilityMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchCheckLayerAvailability{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchCheckLayerAvailability{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "BatchCheckLayerAvailability"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpBatchCheckLayerAvailabilityValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchCheckLayerAvailability(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opBatchCheckLayerAvailability(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "BatchCheckLayerAvailability", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchDeleteImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchDeleteImage.go deleted file mode 100644 index 4ff2a07ef7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchDeleteImage.go +++ /dev/null @@ -1,155 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes a list of specified images that are within a repository in a public -// registry. Images are specified with either an imageTag or imageDigest . You can -// remove a tag from an image by specifying the image's tag in your request. When -// you remove the last tag from an image, the image is deleted from your -// repository. You can completely delete an image (and all of its tags) by -// specifying the digest of the image in your request. -func (c *Client) BatchDeleteImage(ctx context.Context, params *BatchDeleteImageInput, optFns ...func(*Options)) (*BatchDeleteImageOutput, error) { - if params == nil { - params = &BatchDeleteImageInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "BatchDeleteImage", params, optFns, c.addOperationBatchDeleteImageMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*BatchDeleteImageOutput) - out.ResultMetadata = metadata - return out, nil -} - -type BatchDeleteImageInput struct { - - // A list of image ID references that correspond to images to delete. The format - // of the imageIds reference is imageTag=tag or imageDigest=digest . - // - // This member is required. - ImageIds []types.ImageIdentifier - - // The repository in a public registry that contains the image to delete. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID, or registry alias, that's associated with - // the registry that contains the image to delete. If you do not specify a - // registry, the default public registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type BatchDeleteImageOutput struct { - - // Any failures associated with the call. - Failures []types.ImageFailure - - // The image IDs of the deleted images. - ImageIds []types.ImageIdentifier - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationBatchDeleteImageMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchDeleteImage{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchDeleteImage{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "BatchDeleteImage"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpBatchDeleteImageValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchDeleteImage(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opBatchDeleteImage(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "BatchDeleteImage", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CompleteLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CompleteLayerUpload.go deleted file mode 100644 index 7a95db650f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CompleteLayerUpload.go +++ /dev/null @@ -1,167 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Informs Amazon ECR that the image layer upload is complete for a specified -// public registry, repository name, and upload ID. You can optionally provide a -// sha256 digest of the image layer for data validation purposes. When an image is -// pushed, the CompleteLayerUpload API is called once for each new image layer to -// verify that the upload is complete. This operation is used by the Amazon ECR -// proxy and is not generally used by customers for pulling and pushing images. In -// most cases, you should use the docker CLI to pull, tag, and push images. -func (c *Client) CompleteLayerUpload(ctx context.Context, params *CompleteLayerUploadInput, optFns ...func(*Options)) (*CompleteLayerUploadOutput, error) { - if params == nil { - params = &CompleteLayerUploadInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CompleteLayerUpload", params, optFns, c.addOperationCompleteLayerUploadMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CompleteLayerUploadOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CompleteLayerUploadInput struct { - - // The sha256 digest of the image layer. - // - // This member is required. - LayerDigests []string - - // The name of the repository in a public registry to associate with the image - // layer. - // - // This member is required. - RepositoryName *string - - // The upload ID from a previous InitiateLayerUpload operation to associate with - // the image layer. - // - // This member is required. - UploadId *string - - // The Amazon Web Services account ID, or registry alias, associated with the - // registry where layers are uploaded. If you do not specify a registry, the - // default public registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type CompleteLayerUploadOutput struct { - - // The sha256 digest of the image layer. - LayerDigest *string - - // The public registry ID that's associated with the request. - RegistryId *string - - // The repository name that's associated with the request. - RepositoryName *string - - // The upload ID that's associated with the layer. - UploadId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCompleteLayerUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpCompleteLayerUpload{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCompleteLayerUpload{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CompleteLayerUpload"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpCompleteLayerUploadValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCompleteLayerUpload(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opCompleteLayerUpload(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CompleteLayerUpload", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CreateRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CreateRepository.go deleted file mode 100644 index 812926059a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CreateRepository.go +++ /dev/null @@ -1,155 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates a repository in a public registry. For more information, see Amazon ECR -// repositories (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) -// in the Amazon Elastic Container Registry User Guide. -func (c *Client) CreateRepository(ctx context.Context, params *CreateRepositoryInput, optFns ...func(*Options)) (*CreateRepositoryOutput, error) { - if params == nil { - params = &CreateRepositoryInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateRepository", params, optFns, c.addOperationCreateRepositoryMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateRepositoryOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateRepositoryInput struct { - - // The name to use for the repository. This appears publicly in the Amazon ECR - // Public Gallery. The repository name can be specified on its own (for example - // nginx-web-app ) or prepended with a namespace to group the repository into a - // category (for example project-a/nginx-web-app ). - // - // This member is required. - RepositoryName *string - - // The details about the repository that are publicly visible in the Amazon ECR - // Public Gallery. - CatalogData *types.RepositoryCatalogDataInput - - // The metadata that you apply to each repository to help categorize and organize - // your repositories. Each tag consists of a key and an optional value. You define - // both of them. Tag keys can have a maximum character length of 128 characters, - // and tag values can have a maximum length of 256 characters. - Tags []types.Tag - - noSmithyDocumentSerde -} - -type CreateRepositoryOutput struct { - - // The catalog data for a repository. This data is publicly visible in the Amazon - // ECR Public Gallery. - CatalogData *types.RepositoryCatalogData - - // The repository that was created. - Repository *types.Repository - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateRepositoryMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateRepository{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateRepository{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateRepository"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpCreateRepositoryValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateRepository(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opCreateRepository(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateRepository", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepository.go deleted file mode 100644 index 1392b24526..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepository.go +++ /dev/null @@ -1,148 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes a repository in a public registry. If the repository contains images, -// you must either manually delete all images in the repository or use the force -// option. This option deletes all images on your behalf before deleting the -// repository. -func (c *Client) DeleteRepository(ctx context.Context, params *DeleteRepositoryInput, optFns ...func(*Options)) (*DeleteRepositoryOutput, error) { - if params == nil { - params = &DeleteRepositoryInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteRepository", params, optFns, c.addOperationDeleteRepositoryMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteRepositoryOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteRepositoryInput struct { - - // The name of the repository to delete. - // - // This member is required. - RepositoryName *string - - // The force option can be used to delete a repository that contains images. If - // the force option is not used, the repository must be empty prior to deletion. - Force bool - - // The Amazon Web Services account ID that's associated with the public registry - // that contains the repository to delete. If you do not specify a registry, the - // default public registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type DeleteRepositoryOutput struct { - - // The repository that was deleted. - Repository *types.Repository - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteRepositoryMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteRepository{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteRepository{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteRepository"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpDeleteRepositoryValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteRepository(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opDeleteRepository(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteRepository", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepositoryPolicy.go deleted file mode 100644 index fc16deb8a2..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepositoryPolicy.go +++ /dev/null @@ -1,147 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes the repository policy that's associated with the specified repository. -func (c *Client) DeleteRepositoryPolicy(ctx context.Context, params *DeleteRepositoryPolicyInput, optFns ...func(*Options)) (*DeleteRepositoryPolicyOutput, error) { - if params == nil { - params = &DeleteRepositoryPolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteRepositoryPolicy", params, optFns, c.addOperationDeleteRepositoryPolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteRepositoryPolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteRepositoryPolicyInput struct { - - // The name of the repository that's associated with the repository policy to - // delete. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID that's associated with the public registry - // that contains the repository policy to delete. If you do not specify a registry, - // the default public registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type DeleteRepositoryPolicyOutput struct { - - // The JSON repository policy that was deleted from the repository. - PolicyText *string - - // The registry ID that's associated with the request. - RegistryId *string - - // The repository name that's associated with the request. - RepositoryName *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteRepositoryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteRepositoryPolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteRepositoryPolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteRepositoryPolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpDeleteRepositoryPolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteRepositoryPolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opDeleteRepositoryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteRepositoryPolicy", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImageTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImageTags.go deleted file mode 100644 index fa2e122fe3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImageTags.go +++ /dev/null @@ -1,261 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns the image tag details for a repository in a public registry. -func (c *Client) DescribeImageTags(ctx context.Context, params *DescribeImageTagsInput, optFns ...func(*Options)) (*DescribeImageTagsOutput, error) { - if params == nil { - params = &DescribeImageTagsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DescribeImageTags", params, optFns, c.addOperationDescribeImageTagsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DescribeImageTagsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DescribeImageTagsInput struct { - - // The name of the repository that contains the image tag details to describe. - // - // This member is required. - RepositoryName *string - - // The maximum number of repository results that's returned by DescribeImageTags - // in paginated output. When this parameter is used, DescribeImageTags only - // returns maxResults results in a single page along with a nextToken response - // element. You can see the remaining results of the initial request by sending - // another DescribeImageTags request with the returned nextToken value. This value - // can be between 1 and 1000. If this parameter isn't used, then DescribeImageTags - // returns up to 100 results and a nextToken value, if applicable. If you specify - // images with imageIds , you can't use this option. - MaxResults *int32 - - // The nextToken value that's returned from a previous paginated DescribeImageTags - // request where maxResults was used and the results exceeded the value of that - // parameter. Pagination continues from the end of the previous results that - // returned the nextToken value. If there are no more results to return, this - // value is null . If you specify images with imageIds , you can't use this option. - NextToken *string - - // The Amazon Web Services account ID that's associated with the public registry - // that contains the repository where images are described. If you do not specify a - // registry, the default public registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type DescribeImageTagsOutput struct { - - // The image tag details for the images in the requested repository. - ImageTagDetails []types.ImageTagDetail - - // The nextToken value to include in a future DescribeImageTags request. When the - // results of a DescribeImageTags request exceed maxResults , you can use this - // value to retrieve the next page of results. If there are no more results to - // return, this value is null . - NextToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDescribeImageTagsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeImageTags{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeImageTags{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeImageTags"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpDescribeImageTagsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImageTags(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -// DescribeImageTagsAPIClient is a client that implements the DescribeImageTags -// operation. -type DescribeImageTagsAPIClient interface { - DescribeImageTags(context.Context, *DescribeImageTagsInput, ...func(*Options)) (*DescribeImageTagsOutput, error) -} - -var _ DescribeImageTagsAPIClient = (*Client)(nil) - -// DescribeImageTagsPaginatorOptions is the paginator options for DescribeImageTags -type DescribeImageTagsPaginatorOptions struct { - // The maximum number of repository results that's returned by DescribeImageTags - // in paginated output. When this parameter is used, DescribeImageTags only - // returns maxResults results in a single page along with a nextToken response - // element. You can see the remaining results of the initial request by sending - // another DescribeImageTags request with the returned nextToken value. This value - // can be between 1 and 1000. If this parameter isn't used, then DescribeImageTags - // returns up to 100 results and a nextToken value, if applicable. If you specify - // images with imageIds , you can't use this option. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// DescribeImageTagsPaginator is a paginator for DescribeImageTags -type DescribeImageTagsPaginator struct { - options DescribeImageTagsPaginatorOptions - client DescribeImageTagsAPIClient - params *DescribeImageTagsInput - nextToken *string - firstPage bool -} - -// NewDescribeImageTagsPaginator returns a new DescribeImageTagsPaginator -func NewDescribeImageTagsPaginator(client DescribeImageTagsAPIClient, params *DescribeImageTagsInput, optFns ...func(*DescribeImageTagsPaginatorOptions)) *DescribeImageTagsPaginator { - if params == nil { - params = &DescribeImageTagsInput{} - } - - options := DescribeImageTagsPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &DescribeImageTagsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *DescribeImageTagsPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next DescribeImageTags page. -func (p *DescribeImageTagsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeImageTagsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - result, err := p.client.DescribeImageTags(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -func newServiceMetadataMiddleware_opDescribeImageTags(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DescribeImageTags", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImages.go deleted file mode 100644 index 660e640a71..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImages.go +++ /dev/null @@ -1,268 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns metadata that's related to the images in a repository in a public -// registry. Beginning with Docker version 1.9, the Docker client compresses image -// layers before pushing them to a V2 Docker registry. The output of the docker -// images command shows the uncompressed image size. Therefore, it might return a -// larger image size than the image sizes that are returned by DescribeImages . -func (c *Client) DescribeImages(ctx context.Context, params *DescribeImagesInput, optFns ...func(*Options)) (*DescribeImagesOutput, error) { - if params == nil { - params = &DescribeImagesInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DescribeImages", params, optFns, c.addOperationDescribeImagesMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DescribeImagesOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DescribeImagesInput struct { - - // The repository that contains the images to describe. - // - // This member is required. - RepositoryName *string - - // The list of image IDs for the requested repository. - ImageIds []types.ImageIdentifier - - // The maximum number of repository results that's returned by DescribeImages in - // paginated output. When this parameter is used, DescribeImages only returns - // maxResults results in a single page along with a nextToken response element. - // You can see the remaining results of the initial request by sending another - // DescribeImages request with the returned nextToken value. This value can be - // between 1 and 1000. If this parameter isn't used, then DescribeImages returns - // up to 100 results and a nextToken value, if applicable. If you specify images - // with imageIds , you can't use this option. - MaxResults *int32 - - // The nextToken value that's returned from a previous paginated DescribeImages - // request where maxResults was used and the results exceeded the value of that - // parameter. Pagination continues from the end of the previous results that - // returned the nextToken value. If there are no more results to return, this - // value is null . If you specify images with imageIds , you can't use this option. - NextToken *string - - // The Amazon Web Services account ID that's associated with the public registry - // that contains the repository where images are described. If you do not specify a - // registry, the default public registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type DescribeImagesOutput struct { - - // A list of ImageDetail objects that contain data about the image. - ImageDetails []types.ImageDetail - - // The nextToken value to include in a future DescribeImages request. When the - // results of a DescribeImages request exceed maxResults , you can use this value - // to retrieve the next page of results. If there are no more results to return, - // this value is null . - NextToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDescribeImagesMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeImages{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeImages{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeImages"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpDescribeImagesValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImages(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -// DescribeImagesAPIClient is a client that implements the DescribeImages -// operation. -type DescribeImagesAPIClient interface { - DescribeImages(context.Context, *DescribeImagesInput, ...func(*Options)) (*DescribeImagesOutput, error) -} - -var _ DescribeImagesAPIClient = (*Client)(nil) - -// DescribeImagesPaginatorOptions is the paginator options for DescribeImages -type DescribeImagesPaginatorOptions struct { - // The maximum number of repository results that's returned by DescribeImages in - // paginated output. When this parameter is used, DescribeImages only returns - // maxResults results in a single page along with a nextToken response element. - // You can see the remaining results of the initial request by sending another - // DescribeImages request with the returned nextToken value. This value can be - // between 1 and 1000. If this parameter isn't used, then DescribeImages returns - // up to 100 results and a nextToken value, if applicable. If you specify images - // with imageIds , you can't use this option. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// DescribeImagesPaginator is a paginator for DescribeImages -type DescribeImagesPaginator struct { - options DescribeImagesPaginatorOptions - client DescribeImagesAPIClient - params *DescribeImagesInput - nextToken *string - firstPage bool -} - -// NewDescribeImagesPaginator returns a new DescribeImagesPaginator -func NewDescribeImagesPaginator(client DescribeImagesAPIClient, params *DescribeImagesInput, optFns ...func(*DescribeImagesPaginatorOptions)) *DescribeImagesPaginator { - if params == nil { - params = &DescribeImagesInput{} - } - - options := DescribeImagesPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &DescribeImagesPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *DescribeImagesPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next DescribeImages page. -func (p *DescribeImagesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeImagesOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - result, err := p.client.DescribeImages(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -func newServiceMetadataMiddleware_opDescribeImages(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DescribeImages", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRegistries.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRegistries.go deleted file mode 100644 index a4a0d06c7d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRegistries.go +++ /dev/null @@ -1,253 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns details for a public registry. -func (c *Client) DescribeRegistries(ctx context.Context, params *DescribeRegistriesInput, optFns ...func(*Options)) (*DescribeRegistriesOutput, error) { - if params == nil { - params = &DescribeRegistriesInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DescribeRegistries", params, optFns, c.addOperationDescribeRegistriesMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DescribeRegistriesOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DescribeRegistriesInput struct { - - // The maximum number of repository results that's returned by DescribeRegistries - // in paginated output. When this parameter is used, DescribeRegistries only - // returns maxResults results in a single page along with a nextToken response - // element. The remaining results of the initial request can be seen by sending - // another DescribeRegistries request with the returned nextToken value. This - // value can be between 1 and 1000. If this parameter isn't used, then - // DescribeRegistries returns up to 100 results and a nextToken value, if - // applicable. - MaxResults *int32 - - // The nextToken value that's returned from a previous paginated DescribeRegistries - // request where maxResults was used and the results exceeded the value of that - // parameter. Pagination continues from the end of the previous results that - // returned the nextToken value. If there are no more results to return, this - // value is null . This token should be treated as an opaque identifier that is - // only used to retrieve the next items in a list and not for other programmatic - // purposes. - NextToken *string - - noSmithyDocumentSerde -} - -type DescribeRegistriesOutput struct { - - // An object that contains the details for a public registry. - // - // This member is required. - Registries []types.Registry - - // The nextToken value to include in a future DescribeRepositories request. If the - // results of a DescribeRepositories request exceed maxResults , you can use this - // value to retrieve the next page of results. If there are no more results, this - // value is null . - NextToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDescribeRegistriesMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeRegistries{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeRegistries{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeRegistries"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeRegistries(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -// DescribeRegistriesAPIClient is a client that implements the DescribeRegistries -// operation. -type DescribeRegistriesAPIClient interface { - DescribeRegistries(context.Context, *DescribeRegistriesInput, ...func(*Options)) (*DescribeRegistriesOutput, error) -} - -var _ DescribeRegistriesAPIClient = (*Client)(nil) - -// DescribeRegistriesPaginatorOptions is the paginator options for -// DescribeRegistries -type DescribeRegistriesPaginatorOptions struct { - // The maximum number of repository results that's returned by DescribeRegistries - // in paginated output. When this parameter is used, DescribeRegistries only - // returns maxResults results in a single page along with a nextToken response - // element. The remaining results of the initial request can be seen by sending - // another DescribeRegistries request with the returned nextToken value. This - // value can be between 1 and 1000. If this parameter isn't used, then - // DescribeRegistries returns up to 100 results and a nextToken value, if - // applicable. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// DescribeRegistriesPaginator is a paginator for DescribeRegistries -type DescribeRegistriesPaginator struct { - options DescribeRegistriesPaginatorOptions - client DescribeRegistriesAPIClient - params *DescribeRegistriesInput - nextToken *string - firstPage bool -} - -// NewDescribeRegistriesPaginator returns a new DescribeRegistriesPaginator -func NewDescribeRegistriesPaginator(client DescribeRegistriesAPIClient, params *DescribeRegistriesInput, optFns ...func(*DescribeRegistriesPaginatorOptions)) *DescribeRegistriesPaginator { - if params == nil { - params = &DescribeRegistriesInput{} - } - - options := DescribeRegistriesPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &DescribeRegistriesPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *DescribeRegistriesPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next DescribeRegistries page. -func (p *DescribeRegistriesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeRegistriesOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - result, err := p.client.DescribeRegistries(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -func newServiceMetadataMiddleware_opDescribeRegistries(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DescribeRegistries", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRepositories.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRepositories.go deleted file mode 100644 index 4646c67603..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRepositories.go +++ /dev/null @@ -1,263 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Describes repositories that are in a public registry. -func (c *Client) DescribeRepositories(ctx context.Context, params *DescribeRepositoriesInput, optFns ...func(*Options)) (*DescribeRepositoriesOutput, error) { - if params == nil { - params = &DescribeRepositoriesInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DescribeRepositories", params, optFns, c.addOperationDescribeRepositoriesMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DescribeRepositoriesOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DescribeRepositoriesInput struct { - - // The maximum number of repository results that's returned by DescribeRepositories - // in paginated output. When this parameter is used, DescribeRepositories only - // returns maxResults results in a single page along with a nextToken response - // element. You can see the remaining results of the initial request by sending - // another DescribeRepositories request with the returned nextToken value. This - // value can be between 1 and 1000. If this parameter isn't used, then - // DescribeRepositories returns up to 100 results and a nextToken value, if - // applicable. If you specify repositories with repositoryNames , you can't use - // this option. - MaxResults *int32 - - // The nextToken value that's returned from a previous paginated - // DescribeRepositories request where maxResults was used and the results exceeded - // the value of that parameter. Pagination continues from the end of the previous - // results that returned the nextToken value. If there are no more results to - // return, this value is null . If you specify repositories with repositoryNames , - // you can't use this option. This token should be treated as an opaque identifier - // that is only used to retrieve the next items in a list and not for other - // programmatic purposes. - NextToken *string - - // The Amazon Web Services account ID that's associated with the registry that - // contains the repositories to be described. If you do not specify a registry, the - // default public registry is assumed. - RegistryId *string - - // A list of repositories to describe. If this parameter is omitted, then all - // repositories in a registry are described. - RepositoryNames []string - - noSmithyDocumentSerde -} - -type DescribeRepositoriesOutput struct { - - // The nextToken value to include in a future DescribeRepositories request. When - // the results of a DescribeRepositories request exceed maxResults , this value can - // be used to retrieve the next page of results. If there are no more results to - // return, this value is null . - NextToken *string - - // A list of repository objects corresponding to valid repositories. - Repositories []types.Repository - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDescribeRepositoriesMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeRepositories{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeRepositories{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeRepositories"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeRepositories(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -// DescribeRepositoriesAPIClient is a client that implements the -// DescribeRepositories operation. -type DescribeRepositoriesAPIClient interface { - DescribeRepositories(context.Context, *DescribeRepositoriesInput, ...func(*Options)) (*DescribeRepositoriesOutput, error) -} - -var _ DescribeRepositoriesAPIClient = (*Client)(nil) - -// DescribeRepositoriesPaginatorOptions is the paginator options for -// DescribeRepositories -type DescribeRepositoriesPaginatorOptions struct { - // The maximum number of repository results that's returned by DescribeRepositories - // in paginated output. When this parameter is used, DescribeRepositories only - // returns maxResults results in a single page along with a nextToken response - // element. You can see the remaining results of the initial request by sending - // another DescribeRepositories request with the returned nextToken value. This - // value can be between 1 and 1000. If this parameter isn't used, then - // DescribeRepositories returns up to 100 results and a nextToken value, if - // applicable. If you specify repositories with repositoryNames , you can't use - // this option. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// DescribeRepositoriesPaginator is a paginator for DescribeRepositories -type DescribeRepositoriesPaginator struct { - options DescribeRepositoriesPaginatorOptions - client DescribeRepositoriesAPIClient - params *DescribeRepositoriesInput - nextToken *string - firstPage bool -} - -// NewDescribeRepositoriesPaginator returns a new DescribeRepositoriesPaginator -func NewDescribeRepositoriesPaginator(client DescribeRepositoriesAPIClient, params *DescribeRepositoriesInput, optFns ...func(*DescribeRepositoriesPaginatorOptions)) *DescribeRepositoriesPaginator { - if params == nil { - params = &DescribeRepositoriesInput{} - } - - options := DescribeRepositoriesPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &DescribeRepositoriesPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *DescribeRepositoriesPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next DescribeRepositories page. -func (p *DescribeRepositoriesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeRepositoriesOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - result, err := p.client.DescribeRepositories(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -func newServiceMetadataMiddleware_opDescribeRepositories(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DescribeRepositories", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetAuthorizationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetAuthorizationToken.go deleted file mode 100644 index 82d1e20ebf..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetAuthorizationToken.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Retrieves an authorization token. An authorization token represents your IAM -// authentication credentials. You can use it to access any Amazon ECR registry -// that your IAM principal has access to. The authorization token is valid for 12 -// hours. This API requires the ecr-public:GetAuthorizationToken and -// sts:GetServiceBearerToken permissions. -func (c *Client) GetAuthorizationToken(ctx context.Context, params *GetAuthorizationTokenInput, optFns ...func(*Options)) (*GetAuthorizationTokenOutput, error) { - if params == nil { - params = &GetAuthorizationTokenInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetAuthorizationToken", params, optFns, c.addOperationGetAuthorizationTokenMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetAuthorizationTokenOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetAuthorizationTokenInput struct { - noSmithyDocumentSerde -} - -type GetAuthorizationTokenOutput struct { - - // An authorization token data object that corresponds to a public registry. - AuthorizationData *types.AuthorizationData - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetAuthorizationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetAuthorizationToken{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetAuthorizationToken{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetAuthorizationToken"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAuthorizationToken(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetAuthorizationToken(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetAuthorizationToken", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRegistryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRegistryCatalogData.go deleted file mode 100644 index b6aea61170..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRegistryCatalogData.go +++ /dev/null @@ -1,129 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Retrieves catalog metadata for a public registry. -func (c *Client) GetRegistryCatalogData(ctx context.Context, params *GetRegistryCatalogDataInput, optFns ...func(*Options)) (*GetRegistryCatalogDataOutput, error) { - if params == nil { - params = &GetRegistryCatalogDataInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetRegistryCatalogData", params, optFns, c.addOperationGetRegistryCatalogDataMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetRegistryCatalogDataOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetRegistryCatalogDataInput struct { - noSmithyDocumentSerde -} - -type GetRegistryCatalogDataOutput struct { - - // The catalog metadata for the public registry. - // - // This member is required. - RegistryCatalogData *types.RegistryCatalogData - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetRegistryCatalogDataMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetRegistryCatalogData{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetRegistryCatalogData{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetRegistryCatalogData"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRegistryCatalogData(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetRegistryCatalogData(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetRegistryCatalogData", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryCatalogData.go deleted file mode 100644 index b7fc89975e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryCatalogData.go +++ /dev/null @@ -1,142 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Retrieve catalog metadata for a repository in a public registry. This metadata -// is displayed publicly in the Amazon ECR Public Gallery. -func (c *Client) GetRepositoryCatalogData(ctx context.Context, params *GetRepositoryCatalogDataInput, optFns ...func(*Options)) (*GetRepositoryCatalogDataOutput, error) { - if params == nil { - params = &GetRepositoryCatalogDataInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetRepositoryCatalogData", params, optFns, c.addOperationGetRepositoryCatalogDataMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetRepositoryCatalogDataOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetRepositoryCatalogDataInput struct { - - // The name of the repository to retrieve the catalog metadata for. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID that's associated with the registry that - // contains the repositories to be described. If you do not specify a registry, the - // default public registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type GetRepositoryCatalogDataOutput struct { - - // The catalog metadata for the repository. - CatalogData *types.RepositoryCatalogData - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetRepositoryCatalogDataMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetRepositoryCatalogData{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetRepositoryCatalogData{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetRepositoryCatalogData"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpGetRepositoryCatalogDataValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRepositoryCatalogData(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetRepositoryCatalogData(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetRepositoryCatalogData", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryPolicy.go deleted file mode 100644 index 677f128dac..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryPolicy.go +++ /dev/null @@ -1,147 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Retrieves the repository policy for the specified repository. -func (c *Client) GetRepositoryPolicy(ctx context.Context, params *GetRepositoryPolicyInput, optFns ...func(*Options)) (*GetRepositoryPolicyOutput, error) { - if params == nil { - params = &GetRepositoryPolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetRepositoryPolicy", params, optFns, c.addOperationGetRepositoryPolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetRepositoryPolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetRepositoryPolicyInput struct { - - // The name of the repository with the policy to retrieve. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID that's associated with the public registry - // that contains the repository. If you do not specify a registry, the default - // public registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type GetRepositoryPolicyOutput struct { - - // The repository policy text that's associated with the repository. The policy - // text will be in JSON format. - PolicyText *string - - // The registry ID that's associated with the request. - RegistryId *string - - // The repository name that's associated with the request. - RepositoryName *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetRepositoryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetRepositoryPolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetRepositoryPolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetRepositoryPolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpGetRepositoryPolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRepositoryPolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetRepositoryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetRepositoryPolicy", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_InitiateLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_InitiateLayerUpload.go deleted file mode 100644 index c0d7d0f637..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_InitiateLayerUpload.go +++ /dev/null @@ -1,149 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Notifies Amazon ECR that you intend to upload an image layer. When an image is -// pushed, the InitiateLayerUpload API is called once for each image layer that -// hasn't already been uploaded. Whether an image layer uploads is determined by -// the BatchCheckLayerAvailability API action. This operation is used by the Amazon -// ECR proxy and is not generally used by customers for pulling and pushing images. -// In most cases, you should use the docker CLI to pull, tag, and push images. -func (c *Client) InitiateLayerUpload(ctx context.Context, params *InitiateLayerUploadInput, optFns ...func(*Options)) (*InitiateLayerUploadOutput, error) { - if params == nil { - params = &InitiateLayerUploadInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "InitiateLayerUpload", params, optFns, c.addOperationInitiateLayerUploadMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*InitiateLayerUploadOutput) - out.ResultMetadata = metadata - return out, nil -} - -type InitiateLayerUploadInput struct { - - // The name of the repository that you want to upload layers to. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID, or registry alias, that's associated with - // the registry to which you intend to upload layers. If you do not specify a - // registry, the default public registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type InitiateLayerUploadOutput struct { - - // The size, in bytes, that Amazon ECR expects future layer part uploads to be. - PartSize *int64 - - // The upload ID for the layer upload. This parameter is passed to further - // UploadLayerPart and CompleteLayerUpload operations. - UploadId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationInitiateLayerUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpInitiateLayerUpload{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpInitiateLayerUpload{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "InitiateLayerUpload"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpInitiateLayerUploadValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opInitiateLayerUpload(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opInitiateLayerUpload(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "InitiateLayerUpload", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_ListTagsForResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_ListTagsForResource.go deleted file mode 100644 index 1350ecd3f9..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_ListTagsForResource.go +++ /dev/null @@ -1,137 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// List the tags for an Amazon ECR Public resource. -func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForResourceInput, optFns ...func(*Options)) (*ListTagsForResourceOutput, error) { - if params == nil { - params = &ListTagsForResourceInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListTagsForResource", params, optFns, c.addOperationListTagsForResourceMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListTagsForResourceOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListTagsForResourceInput struct { - - // The Amazon Resource Name (ARN) that identifies the resource to list the tags - // for. Currently, the supported resource is an Amazon ECR Public repository. - // - // This member is required. - ResourceArn *string - - noSmithyDocumentSerde -} - -type ListTagsForResourceOutput struct { - - // The tags for the resource. - Tags []types.Tag - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpListTagsForResource{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListTagsForResource{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListTagsForResource"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpListTagsForResourceValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTagsForResource(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opListTagsForResource(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListTagsForResource", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutImage.go deleted file mode 100644 index 55efd6e929..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutImage.go +++ /dev/null @@ -1,164 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates or updates the image manifest and tags that are associated with an -// image. When an image is pushed and all new image layers have been uploaded, the -// PutImage API is called once to create or update the image manifest and the tags -// that are associated with the image. This operation is used by the Amazon ECR -// proxy and is not generally used by customers for pulling and pushing images. In -// most cases, you should use the docker CLI to pull, tag, and push images. -func (c *Client) PutImage(ctx context.Context, params *PutImageInput, optFns ...func(*Options)) (*PutImageOutput, error) { - if params == nil { - params = &PutImageInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutImage", params, optFns, c.addOperationPutImageMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutImageOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutImageInput struct { - - // The image manifest that corresponds to the image to be uploaded. - // - // This member is required. - ImageManifest *string - - // The name of the repository where the image is put. - // - // This member is required. - RepositoryName *string - - // The image digest of the image manifest that corresponds to the image. - ImageDigest *string - - // The media type of the image manifest. If you push an image manifest that - // doesn't contain the mediaType field, you must specify the imageManifestMediaType - // in the request. - ImageManifestMediaType *string - - // The tag to associate with the image. This parameter is required for images that - // use the Docker Image Manifest V2 Schema 2 or Open Container Initiative (OCI) - // formats. - ImageTag *string - - // The Amazon Web Services account ID, or registry alias, that's associated with - // the public registry that contains the repository where the image is put. If you - // do not specify a registry, the default public registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type PutImageOutput struct { - - // Details of the image uploaded. - Image *types.Image - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutImageMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutImage{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutImage{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutImage"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpPutImageValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutImage(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opPutImage(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutImage", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRegistryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRegistryCatalogData.go deleted file mode 100644 index f54f633f2e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRegistryCatalogData.go +++ /dev/null @@ -1,135 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Create or update the catalog data for a public registry. -func (c *Client) PutRegistryCatalogData(ctx context.Context, params *PutRegistryCatalogDataInput, optFns ...func(*Options)) (*PutRegistryCatalogDataOutput, error) { - if params == nil { - params = &PutRegistryCatalogDataInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutRegistryCatalogData", params, optFns, c.addOperationPutRegistryCatalogDataMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutRegistryCatalogDataOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutRegistryCatalogDataInput struct { - - // The display name for a public registry. The display name is shown as the - // repository author in the Amazon ECR Public Gallery. The registry display name is - // only publicly visible in the Amazon ECR Public Gallery for verified accounts. - DisplayName *string - - noSmithyDocumentSerde -} - -type PutRegistryCatalogDataOutput struct { - - // The catalog data for the public registry. - // - // This member is required. - RegistryCatalogData *types.RegistryCatalogData - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutRegistryCatalogDataMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutRegistryCatalogData{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutRegistryCatalogData{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutRegistryCatalogData"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutRegistryCatalogData(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opPutRegistryCatalogData(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutRegistryCatalogData", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRepositoryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRepositoryCatalogData.go deleted file mode 100644 index 45d1382fb6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRepositoryCatalogData.go +++ /dev/null @@ -1,147 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates or updates the catalog data for a repository in a public registry. -func (c *Client) PutRepositoryCatalogData(ctx context.Context, params *PutRepositoryCatalogDataInput, optFns ...func(*Options)) (*PutRepositoryCatalogDataOutput, error) { - if params == nil { - params = &PutRepositoryCatalogDataInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutRepositoryCatalogData", params, optFns, c.addOperationPutRepositoryCatalogDataMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutRepositoryCatalogDataOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutRepositoryCatalogDataInput struct { - - // An object containing the catalog data for a repository. This data is publicly - // visible in the Amazon ECR Public Gallery. - // - // This member is required. - CatalogData *types.RepositoryCatalogDataInput - - // The name of the repository to create or update the catalog data for. - // - // This member is required. - RepositoryName *string - - // The Amazon Web Services account ID that's associated with the public registry - // the repository is in. If you do not specify a registry, the default public - // registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type PutRepositoryCatalogDataOutput struct { - - // The catalog data for the repository. - CatalogData *types.RepositoryCatalogData - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutRepositoryCatalogDataMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutRepositoryCatalogData{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutRepositoryCatalogData{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutRepositoryCatalogData"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpPutRepositoryCatalogDataValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutRepositoryCatalogData(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opPutRepositoryCatalogData(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutRepositoryCatalogData", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_SetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_SetRepositoryPolicy.go deleted file mode 100644 index d3a93d1d51..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_SetRepositoryPolicy.go +++ /dev/null @@ -1,160 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Applies a repository policy to the specified public repository to control -// access permissions. For more information, see Amazon ECR Repository Policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html) -// in the Amazon Elastic Container Registry User Guide. -func (c *Client) SetRepositoryPolicy(ctx context.Context, params *SetRepositoryPolicyInput, optFns ...func(*Options)) (*SetRepositoryPolicyOutput, error) { - if params == nil { - params = &SetRepositoryPolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "SetRepositoryPolicy", params, optFns, c.addOperationSetRepositoryPolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*SetRepositoryPolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type SetRepositoryPolicyInput struct { - - // The JSON repository policy text to apply to the repository. For more - // information, see Amazon ECR Repository Policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) - // in the Amazon Elastic Container Registry User Guide. - // - // This member is required. - PolicyText *string - - // The name of the repository to receive the policy. - // - // This member is required. - RepositoryName *string - - // If the policy that you want to set on a repository policy would prevent you - // from setting another policy in the future, you must force the - // SetRepositoryPolicy operation. This prevents accidental repository lockouts. - Force bool - - // The Amazon Web Services account ID that's associated with the registry that - // contains the repository. If you do not specify a registry, the default public - // registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type SetRepositoryPolicyOutput struct { - - // The JSON repository policy text that's applied to the repository. - PolicyText *string - - // The registry ID that's associated with the request. - RegistryId *string - - // The repository name that's associated with the request. - RepositoryName *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationSetRepositoryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpSetRepositoryPolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpSetRepositoryPolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "SetRepositoryPolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpSetRepositoryPolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSetRepositoryPolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opSetRepositoryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "SetRepositoryPolicy", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_TagResource.go deleted file mode 100644 index 4f68992313..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_TagResource.go +++ /dev/null @@ -1,143 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Associates the specified tags to a resource with the specified resourceArn . If -// existing tags on a resource aren't specified in the request parameters, they -// aren't changed. When a resource is deleted, the tags associated with that -// resource are also deleted. -func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { - if params == nil { - params = &TagResourceInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "TagResource", params, optFns, c.addOperationTagResourceMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*TagResourceOutput) - out.ResultMetadata = metadata - return out, nil -} - -type TagResourceInput struct { - - // The Amazon Resource Name (ARN) of the resource to add tags to. Currently, the - // supported resource is an Amazon ECR Public repository. - // - // This member is required. - ResourceArn *string - - // The tags to add to the resource. A tag is an array of key-value pairs. Tag keys - // can have a maximum character length of 128 characters, and tag values can have a - // maximum length of 256 characters. - // - // This member is required. - Tags []types.Tag - - noSmithyDocumentSerde -} - -type TagResourceOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpTagResource{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpTagResource{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "TagResource"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpTagResourceValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "TagResource", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UntagResource.go deleted file mode 100644 index 8ebfae032b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UntagResource.go +++ /dev/null @@ -1,137 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes specified tags from a resource. -func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { - if params == nil { - params = &UntagResourceInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "UntagResource", params, optFns, c.addOperationUntagResourceMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*UntagResourceOutput) - out.ResultMetadata = metadata - return out, nil -} - -type UntagResourceInput struct { - - // The Amazon Resource Name (ARN) of the resource to delete tags from. Currently, - // the supported resource is an Amazon ECR Public repository. - // - // This member is required. - ResourceArn *string - - // The keys of the tags to be removed. - // - // This member is required. - TagKeys []string - - noSmithyDocumentSerde -} - -type UntagResourceOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpUntagResource{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUntagResource{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "UntagResource"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpUntagResourceValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "UntagResource", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UploadLayerPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UploadLayerPart.go deleted file mode 100644 index bc5d65206f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UploadLayerPart.go +++ /dev/null @@ -1,175 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Uploads an image layer part to Amazon ECR. When an image is pushed, each new -// image layer is uploaded in parts. The maximum size of each image layer part can -// be 20971520 bytes (about 20MB). The UploadLayerPart API is called once for each -// new image layer part. This operation is used by the Amazon ECR proxy and is not -// generally used by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. -func (c *Client) UploadLayerPart(ctx context.Context, params *UploadLayerPartInput, optFns ...func(*Options)) (*UploadLayerPartOutput, error) { - if params == nil { - params = &UploadLayerPartInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "UploadLayerPart", params, optFns, c.addOperationUploadLayerPartMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*UploadLayerPartOutput) - out.ResultMetadata = metadata - return out, nil -} - -type UploadLayerPartInput struct { - - // The base64-encoded layer part payload. - // - // This member is required. - LayerPartBlob []byte - - // The position of the first byte of the layer part witin the overall image layer. - // - // This member is required. - PartFirstByte *int64 - - // The position of the last byte of the layer part within the overall image layer. - // - // This member is required. - PartLastByte *int64 - - // The name of the repository that you're uploading layer parts to. - // - // This member is required. - RepositoryName *string - - // The upload ID from a previous InitiateLayerUpload operation to associate with - // the layer part upload. - // - // This member is required. - UploadId *string - - // The Amazon Web Services account ID, or registry alias, that's associated with - // the registry that you're uploading layer parts to. If you do not specify a - // registry, the default public registry is assumed. - RegistryId *string - - noSmithyDocumentSerde -} - -type UploadLayerPartOutput struct { - - // The integer value of the last byte that's received in the request. - LastByteReceived *int64 - - // The registry ID that's associated with the request. - RegistryId *string - - // The repository name that's associated with the request. - RepositoryName *string - - // The upload ID that's associated with the request. - UploadId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationUploadLayerPartMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsjson11_serializeOpUploadLayerPart{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUploadLayerPart{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "UploadLayerPart"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpUploadLayerPartValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadLayerPart(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opUploadLayerPart(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "UploadLayerPart", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/auth.go deleted file mode 100644 index 80729fbc78..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/auth.go +++ /dev/null @@ -1,284 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { - params.Region = options.Region -} - -type setLegacyContextSigningOptionsMiddleware struct { -} - -func (*setLegacyContextSigningOptionsMiddleware) ID() string { - return "setLegacyContextSigningOptions" -} - -func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - schemeID := rscheme.Scheme.SchemeID() - - if sn := awsmiddleware.GetSigningName(ctx); sn != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) - } - } - - if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) - } - } - - return next.HandleFinalize(ctx, in) -} - -func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) -} - -type withAnonymous struct { - resolver AuthSchemeResolver -} - -var _ AuthSchemeResolver = (*withAnonymous)(nil) - -func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - opts, err := v.resolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return nil, err - } - - opts = append(opts, &smithyauth.Option{ - SchemeID: smithyauth.SchemeIDAnonymous, - }) - return opts, nil -} - -func wrapWithAnonymousAuth(options *Options) { - if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { - return - } - - options.AuthSchemeResolver = &withAnonymous{ - resolver: options.AuthSchemeResolver, - } -} - -// AuthResolverParameters contains the set of inputs necessary for auth scheme -// resolution. -type AuthResolverParameters struct { - // The name of the operation being invoked. - Operation string - - // The region in which the operation is being invoked. - Region string -} - -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { - params := &AuthResolverParameters{ - Operation: operation, - } - - bindAuthParamsRegion(params, input, options) - - return params -} - -// AuthSchemeResolver returns a set of possible authentication options for an -// operation. -type AuthSchemeResolver interface { - ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) -} - -type defaultAuthSchemeResolver struct{} - -var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) - -func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - if overrides, ok := operationAuthOptions[params.Operation]; ok { - return overrides(params), nil - } - return serviceAuthOptions(params), nil -} - -var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{} - -func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "ecr-public") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - return props - }(), - }, - } -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) - options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) - } - - scheme, ok := m.selectScheme(options) - if !ok { - return out, metadata, fmt.Errorf("could not select an auth scheme") - } - - ctx = setResolvedAuthScheme(ctx, scheme) - return next.HandleFinalize(ctx, in) -} - -func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - for _, option := range options { - if option.SchemeID == smithyauth.SchemeIDAnonymous { - return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true - } - - for _, scheme := range m.options.AuthSchemes { - if scheme.SchemeID() != option.SchemeID { - continue - } - - if scheme.IdentityResolver(m.options) != nil { - return newResolvedAuthScheme(scheme, option), true - } - } - } - - return nil, false -} - -type resolvedAuthSchemeKey struct{} - -type resolvedAuthScheme struct { - Scheme smithyhttp.AuthScheme - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { - return &resolvedAuthScheme{ - Scheme: scheme, - IdentityProperties: option.IdentityProperties, - SignerProperties: option.SignerProperties, - } -} - -func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { - return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) -} - -func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { - v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) - return v -} - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - resolver := rscheme.Scheme.IdentityResolver(m.options) - if resolver == nil { - return out, metadata, fmt.Errorf("no identity resolver") - } - - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) - if err != nil { - return out, metadata, fmt.Errorf("get identity: %w", err) - } - - ctx = setIdentity(ctx, identity) - return next.HandleFinalize(ctx, in) -} - -type identityKey struct{} - -func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { - return middleware.WithStackValue(ctx, identityKey{}, identity) -} - -func getIdentity(ctx context.Context) smithyauth.Identity { - v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) - return v -} - -type signRequestMiddleware struct { -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - identity := getIdentity(ctx) - if identity == nil { - return out, metadata, fmt.Errorf("no identity") - } - - signer := rscheme.Scheme.Signer() - if signer == nil { - return out, metadata, fmt.Errorf("no signer") - } - - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { - return out, metadata, fmt.Errorf("sign request: %w", err) - } - - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/deserializers.go deleted file mode 100644 index b6fbd266f6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/deserializers.go +++ /dev/null @@ -1,7116 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - smithy "github.com/aws/smithy-go" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithytime "github.com/aws/smithy-go/time" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "strings" -) - -type awsAwsjson11_deserializeOpBatchCheckLayerAvailability struct { -} - -func (*awsAwsjson11_deserializeOpBatchCheckLayerAvailability) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpBatchCheckLayerAvailability) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorBatchCheckLayerAvailability(response, &metadata) - } - output := &BatchCheckLayerAvailabilityOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentBatchCheckLayerAvailabilityOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorBatchCheckLayerAvailability(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RegistryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRegistryNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpBatchDeleteImage struct { -} - -func (*awsAwsjson11_deserializeOpBatchDeleteImage) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpBatchDeleteImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorBatchDeleteImage(response, &metadata) - } - output := &BatchDeleteImageOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentBatchDeleteImageOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorBatchDeleteImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpCompleteLayerUpload struct { -} - -func (*awsAwsjson11_deserializeOpCompleteLayerUpload) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpCompleteLayerUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorCompleteLayerUpload(response, &metadata) - } - output := &CompleteLayerUploadOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentCompleteLayerUploadOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorCompleteLayerUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("EmptyUploadException", errorCode): - return awsAwsjson11_deserializeErrorEmptyUploadException(response, errorBody) - - case strings.EqualFold("InvalidLayerException", errorCode): - return awsAwsjson11_deserializeErrorInvalidLayerException(response, errorBody) - - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("LayerAlreadyExistsException", errorCode): - return awsAwsjson11_deserializeErrorLayerAlreadyExistsException(response, errorBody) - - case strings.EqualFold("LayerPartTooSmallException", errorCode): - return awsAwsjson11_deserializeErrorLayerPartTooSmallException(response, errorBody) - - case strings.EqualFold("RegistryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRegistryNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - case strings.EqualFold("UploadNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorUploadNotFoundException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpCreateRepository struct { -} - -func (*awsAwsjson11_deserializeOpCreateRepository) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpCreateRepository) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorCreateRepository(response, &metadata) - } - output := &CreateRepositoryOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentCreateRepositoryOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorCreateRepository(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("InvalidTagParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) - - case strings.EqualFold("LimitExceededException", errorCode): - return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) - - case strings.EqualFold("RepositoryAlreadyExistsException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryAlreadyExistsException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("TooManyTagsException", errorCode): - return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDeleteRepository struct { -} - -func (*awsAwsjson11_deserializeOpDeleteRepository) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDeleteRepository) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDeleteRepository(response, &metadata) - } - output := &DeleteRepositoryOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDeleteRepositoryOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDeleteRepository(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotEmptyException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotEmptyException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDeleteRepositoryPolicy struct { -} - -func (*awsAwsjson11_deserializeOpDeleteRepositoryPolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDeleteRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response, &metadata) - } - output := &DeleteRepositoryPolicyOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDeleteRepositoryPolicyOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryPolicyNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryPolicyNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDescribeImages struct { -} - -func (*awsAwsjson11_deserializeOpDescribeImages) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDescribeImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImages(response, &metadata) - } - output := &DescribeImagesOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDescribeImagesOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDescribeImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("ImageNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) - - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDescribeImageTags struct { -} - -func (*awsAwsjson11_deserializeOpDescribeImageTags) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDescribeImageTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImageTags(response, &metadata) - } - output := &DescribeImageTagsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDescribeImageTagsOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDescribeImageTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDescribeRegistries struct { -} - -func (*awsAwsjson11_deserializeOpDescribeRegistries) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDescribeRegistries) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeRegistries(response, &metadata) - } - output := &DescribeRegistriesOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDescribeRegistriesOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDescribeRegistries(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDescribeRepositories struct { -} - -func (*awsAwsjson11_deserializeOpDescribeRepositories) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDescribeRepositories) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeRepositories(response, &metadata) - } - output := &DescribeRepositoriesOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDescribeRepositoriesOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpGetAuthorizationToken struct { -} - -func (*awsAwsjson11_deserializeOpGetAuthorizationToken) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpGetAuthorizationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response, &metadata) - } - output := &GetAuthorizationTokenOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentGetAuthorizationTokenOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpGetRegistryCatalogData struct { -} - -func (*awsAwsjson11_deserializeOpGetRegistryCatalogData) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpGetRegistryCatalogData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetRegistryCatalogData(response, &metadata) - } - output := &GetRegistryCatalogDataOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentGetRegistryCatalogDataOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorGetRegistryCatalogData(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpGetRepositoryCatalogData struct { -} - -func (*awsAwsjson11_deserializeOpGetRepositoryCatalogData) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpGetRepositoryCatalogData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetRepositoryCatalogData(response, &metadata) - } - output := &GetRepositoryCatalogDataOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentGetRepositoryCatalogDataOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorGetRepositoryCatalogData(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryCatalogDataNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryCatalogDataNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpGetRepositoryPolicy struct { -} - -func (*awsAwsjson11_deserializeOpGetRepositoryPolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpGetRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response, &metadata) - } - output := &GetRepositoryPolicyOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentGetRepositoryPolicyOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryPolicyNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryPolicyNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpInitiateLayerUpload struct { -} - -func (*awsAwsjson11_deserializeOpInitiateLayerUpload) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpInitiateLayerUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response, &metadata) - } - output := &InitiateLayerUploadOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentInitiateLayerUploadOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RegistryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRegistryNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpListTagsForResource struct { -} - -func (*awsAwsjson11_deserializeOpListTagsForResource) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListTagsForResource(response, &metadata) - } - output := &ListTagsForResourceOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpPutImage struct { -} - -func (*awsAwsjson11_deserializeOpPutImage) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpPutImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutImage(response, &metadata) - } - output := &PutImageOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentPutImageOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorPutImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("ImageAlreadyExistsException", errorCode): - return awsAwsjson11_deserializeErrorImageAlreadyExistsException(response, errorBody) - - case strings.EqualFold("ImageDigestDoesNotMatchException", errorCode): - return awsAwsjson11_deserializeErrorImageDigestDoesNotMatchException(response, errorBody) - - case strings.EqualFold("ImageTagAlreadyExistsException", errorCode): - return awsAwsjson11_deserializeErrorImageTagAlreadyExistsException(response, errorBody) - - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("LayersNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorLayersNotFoundException(response, errorBody) - - case strings.EqualFold("LimitExceededException", errorCode): - return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) - - case strings.EqualFold("ReferencedImagesNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorReferencedImagesNotFoundException(response, errorBody) - - case strings.EqualFold("RegistryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRegistryNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpPutRegistryCatalogData struct { -} - -func (*awsAwsjson11_deserializeOpPutRegistryCatalogData) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpPutRegistryCatalogData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutRegistryCatalogData(response, &metadata) - } - output := &PutRegistryCatalogDataOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentPutRegistryCatalogDataOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorPutRegistryCatalogData(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpPutRepositoryCatalogData struct { -} - -func (*awsAwsjson11_deserializeOpPutRepositoryCatalogData) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpPutRepositoryCatalogData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorPutRepositoryCatalogData(response, &metadata) - } - output := &PutRepositoryCatalogDataOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentPutRepositoryCatalogDataOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorPutRepositoryCatalogData(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpSetRepositoryPolicy struct { -} - -func (*awsAwsjson11_deserializeOpSetRepositoryPolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpSetRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response, &metadata) - } - output := &SetRepositoryPolicyOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentSetRepositoryPolicyOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpTagResource struct { -} - -func (*awsAwsjson11_deserializeOpTagResource) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorTagResource(response, &metadata) - } - output := &TagResourceOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentTagResourceOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("InvalidTagParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("TooManyTagsException", errorCode): - return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpUntagResource struct { -} - -func (*awsAwsjson11_deserializeOpUntagResource) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorUntagResource(response, &metadata) - } - output := &UntagResourceOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentUntagResourceOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("InvalidTagParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("TooManyTagsException", errorCode): - return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpUploadLayerPart struct { -} - -func (*awsAwsjson11_deserializeOpUploadLayerPart) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpUploadLayerPart) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorUploadLayerPart(response, &metadata) - } - output := &UploadLayerPartOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentUploadLayerPartOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorUploadLayerPart(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - bodyInfo, err := getProtocolErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { - errorCode = restjson.SanitizeErrorCode(typ) - } - if len(bodyInfo.Message) != 0 { - errorMessage = bodyInfo.Message - } - switch { - case strings.EqualFold("InvalidLayerPartException", errorCode): - return awsAwsjson11_deserializeErrorInvalidLayerPartException(response, errorBody) - - case strings.EqualFold("InvalidParameterException", errorCode): - return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) - - case strings.EqualFold("LimitExceededException", errorCode): - return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) - - case strings.EqualFold("RegistryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRegistryNotFoundException(response, errorBody) - - case strings.EqualFold("RepositoryNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) - - case strings.EqualFold("ServerException", errorCode): - return awsAwsjson11_deserializeErrorServerException(response, errorBody) - - case strings.EqualFold("UnsupportedCommandException", errorCode): - return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) - - case strings.EqualFold("UploadNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorUploadNotFoundException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsAwsjson11_deserializeErrorEmptyUploadException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.EmptyUploadException{} - err := awsAwsjson11_deserializeDocumentEmptyUploadException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorImageAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.ImageAlreadyExistsException{} - err := awsAwsjson11_deserializeDocumentImageAlreadyExistsException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorImageDigestDoesNotMatchException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.ImageDigestDoesNotMatchException{} - err := awsAwsjson11_deserializeDocumentImageDigestDoesNotMatchException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorImageNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.ImageNotFoundException{} - err := awsAwsjson11_deserializeDocumentImageNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorImageTagAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.ImageTagAlreadyExistsException{} - err := awsAwsjson11_deserializeDocumentImageTagAlreadyExistsException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorInvalidLayerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.InvalidLayerException{} - err := awsAwsjson11_deserializeDocumentInvalidLayerException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorInvalidLayerPartException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.InvalidLayerPartException{} - err := awsAwsjson11_deserializeDocumentInvalidLayerPartException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorInvalidParameterException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.InvalidParameterException{} - err := awsAwsjson11_deserializeDocumentInvalidParameterException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorInvalidTagParameterException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.InvalidTagParameterException{} - err := awsAwsjson11_deserializeDocumentInvalidTagParameterException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorLayerAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.LayerAlreadyExistsException{} - err := awsAwsjson11_deserializeDocumentLayerAlreadyExistsException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorLayerPartTooSmallException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.LayerPartTooSmallException{} - err := awsAwsjson11_deserializeDocumentLayerPartTooSmallException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorLayersNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.LayersNotFoundException{} - err := awsAwsjson11_deserializeDocumentLayersNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorLimitExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.LimitExceededException{} - err := awsAwsjson11_deserializeDocumentLimitExceededException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorReferencedImagesNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.ReferencedImagesNotFoundException{} - err := awsAwsjson11_deserializeDocumentReferencedImagesNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorRegistryNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.RegistryNotFoundException{} - err := awsAwsjson11_deserializeDocumentRegistryNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorRepositoryAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.RepositoryAlreadyExistsException{} - err := awsAwsjson11_deserializeDocumentRepositoryAlreadyExistsException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorRepositoryCatalogDataNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.RepositoryCatalogDataNotFoundException{} - err := awsAwsjson11_deserializeDocumentRepositoryCatalogDataNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorRepositoryNotEmptyException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.RepositoryNotEmptyException{} - err := awsAwsjson11_deserializeDocumentRepositoryNotEmptyException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorRepositoryNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.RepositoryNotFoundException{} - err := awsAwsjson11_deserializeDocumentRepositoryNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorRepositoryPolicyNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.RepositoryPolicyNotFoundException{} - err := awsAwsjson11_deserializeDocumentRepositoryPolicyNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.ServerException{} - err := awsAwsjson11_deserializeDocumentServerException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorTooManyTagsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.TooManyTagsException{} - err := awsAwsjson11_deserializeDocumentTooManyTagsException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorUnsupportedCommandException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.UnsupportedCommandException{} - err := awsAwsjson11_deserializeDocumentUnsupportedCommandException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeErrorUploadNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.UploadNotFoundException{} - err := awsAwsjson11_deserializeDocumentUploadNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - return output -} - -func awsAwsjson11_deserializeDocumentArchitectureList(v *[]string, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []string - if *v == nil { - cv = []string{} - } else { - cv = *v - } - - for _, value := range shape { - var col string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Architecture to be of type string, got %T instead", value) - } - col = jtv - } - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentAuthorizationData(v **types.AuthorizationData, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.AuthorizationData - if *v == nil { - sv = &types.AuthorizationData{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "authorizationToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Base64 to be of type string, got %T instead", value) - } - sv.AuthorizationToken = ptr.String(jtv) - } - - case "expiresAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.ExpiresAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected ExpirationTimestamp to be a JSON Number, got %T instead", value) - - } - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentEmptyUploadException(v **types.EmptyUploadException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.EmptyUploadException - if *v == nil { - sv = &types.EmptyUploadException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImage(v **types.Image, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.Image - if *v == nil { - sv = &types.Image{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "imageId": - if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { - return err - } - - case "imageManifest": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageManifest to be of type string, got %T instead", value) - } - sv.ImageManifest = ptr.String(jtv) - } - - case "imageManifestMediaType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) - } - sv.ImageManifestMediaType = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryIdOrAlias to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageAlreadyExistsException(v **types.ImageAlreadyExistsException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageAlreadyExistsException - if *v == nil { - sv = &types.ImageAlreadyExistsException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageDetail(v **types.ImageDetail, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageDetail - if *v == nil { - sv = &types.ImageDetail{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "artifactMediaType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) - } - sv.ArtifactMediaType = ptr.String(jtv) - } - - case "imageDigest": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) - } - sv.ImageDigest = ptr.String(jtv) - } - - case "imageManifestMediaType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) - } - sv.ImageManifestMediaType = ptr.String(jtv) - } - - case "imagePushedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.ImagePushedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected PushTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "imageSizeInBytes": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ImageSizeInBytes to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ImageSizeInBytes = ptr.Int64(i64) - } - - case "imageTags": - if err := awsAwsjson11_deserializeDocumentImageTagList(&sv.ImageTags, value); err != nil { - return err - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageDetailList(v *[]types.ImageDetail, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.ImageDetail - if *v == nil { - cv = []types.ImageDetail{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.ImageDetail - destAddr := &col - if err := awsAwsjson11_deserializeDocumentImageDetail(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentImageDigestDoesNotMatchException(v **types.ImageDigestDoesNotMatchException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageDigestDoesNotMatchException - if *v == nil { - sv = &types.ImageDigestDoesNotMatchException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageFailure(v **types.ImageFailure, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageFailure - if *v == nil { - sv = &types.ImageFailure{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "failureCode": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageFailureCode to be of type string, got %T instead", value) - } - sv.FailureCode = types.ImageFailureCode(jtv) - } - - case "failureReason": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageFailureReason to be of type string, got %T instead", value) - } - sv.FailureReason = ptr.String(jtv) - } - - case "imageId": - if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageFailureList(v *[]types.ImageFailure, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.ImageFailure - if *v == nil { - cv = []types.ImageFailure{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.ImageFailure - destAddr := &col - if err := awsAwsjson11_deserializeDocumentImageFailure(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentImageIdentifier(v **types.ImageIdentifier, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageIdentifier - if *v == nil { - sv = &types.ImageIdentifier{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "imageDigest": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) - } - sv.ImageDigest = ptr.String(jtv) - } - - case "imageTag": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageTag to be of type string, got %T instead", value) - } - sv.ImageTag = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageIdentifierList(v *[]types.ImageIdentifier, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.ImageIdentifier - if *v == nil { - cv = []types.ImageIdentifier{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.ImageIdentifier - destAddr := &col - if err := awsAwsjson11_deserializeDocumentImageIdentifier(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentImageNotFoundException(v **types.ImageNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageNotFoundException - if *v == nil { - sv = &types.ImageNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageTagAlreadyExistsException(v **types.ImageTagAlreadyExistsException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageTagAlreadyExistsException - if *v == nil { - sv = &types.ImageTagAlreadyExistsException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageTagDetail(v **types.ImageTagDetail, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImageTagDetail - if *v == nil { - sv = &types.ImageTagDetail{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "createdAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected CreationTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "imageDetail": - if err := awsAwsjson11_deserializeDocumentReferencedImageDetail(&sv.ImageDetail, value); err != nil { - return err - } - - case "imageTag": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageTag to be of type string, got %T instead", value) - } - sv.ImageTag = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentImageTagDetailList(v *[]types.ImageTagDetail, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.ImageTagDetail - if *v == nil { - cv = []types.ImageTagDetail{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.ImageTagDetail - destAddr := &col - if err := awsAwsjson11_deserializeDocumentImageTagDetail(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentImageTagList(v *[]string, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []string - if *v == nil { - cv = []string{} - } else { - cv = *v - } - - for _, value := range shape { - var col string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageTag to be of type string, got %T instead", value) - } - col = jtv - } - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentInvalidLayerException(v **types.InvalidLayerException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidLayerException - if *v == nil { - sv = &types.InvalidLayerException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentInvalidLayerPartException(v **types.InvalidLayerPartException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidLayerPartException - if *v == nil { - sv = &types.InvalidLayerPartException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "lastValidByteReceived": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected PartSize to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.LastValidByteReceived = ptr.Int64(i64) - } - - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - case "uploadId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) - } - sv.UploadId = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentInvalidParameterException(v **types.InvalidParameterException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidParameterException - if *v == nil { - sv = &types.InvalidParameterException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentInvalidTagParameterException(v **types.InvalidTagParameterException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidTagParameterException - if *v == nil { - sv = &types.InvalidTagParameterException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLayer(v **types.Layer, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.Layer - if *v == nil { - sv = &types.Layer{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "layerAvailability": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LayerAvailability to be of type string, got %T instead", value) - } - sv.LayerAvailability = types.LayerAvailability(jtv) - } - - case "layerDigest": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LayerDigest to be of type string, got %T instead", value) - } - sv.LayerDigest = ptr.String(jtv) - } - - case "layerSize": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected LayerSizeInBytes to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.LayerSize = ptr.Int64(i64) - } - - case "mediaType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) - } - sv.MediaType = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLayerAlreadyExistsException(v **types.LayerAlreadyExistsException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LayerAlreadyExistsException - if *v == nil { - sv = &types.LayerAlreadyExistsException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLayerFailure(v **types.LayerFailure, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LayerFailure - if *v == nil { - sv = &types.LayerFailure{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "failureCode": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LayerFailureCode to be of type string, got %T instead", value) - } - sv.FailureCode = types.LayerFailureCode(jtv) - } - - case "failureReason": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LayerFailureReason to be of type string, got %T instead", value) - } - sv.FailureReason = ptr.String(jtv) - } - - case "layerDigest": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected BatchedOperationLayerDigest to be of type string, got %T instead", value) - } - sv.LayerDigest = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLayerFailureList(v *[]types.LayerFailure, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.LayerFailure - if *v == nil { - cv = []types.LayerFailure{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.LayerFailure - destAddr := &col - if err := awsAwsjson11_deserializeDocumentLayerFailure(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentLayerList(v *[]types.Layer, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.Layer - if *v == nil { - cv = []types.Layer{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.Layer - destAddr := &col - if err := awsAwsjson11_deserializeDocumentLayer(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentLayerPartTooSmallException(v **types.LayerPartTooSmallException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LayerPartTooSmallException - if *v == nil { - sv = &types.LayerPartTooSmallException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLayersNotFoundException(v **types.LayersNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LayersNotFoundException - if *v == nil { - sv = &types.LayersNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentLimitExceededException(v **types.LimitExceededException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.LimitExceededException - if *v == nil { - sv = &types.LimitExceededException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentOperatingSystemList(v *[]string, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []string - if *v == nil { - cv = []string{} - } else { - cv = *v - } - - for _, value := range shape { - var col string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected OperatingSystem to be of type string, got %T instead", value) - } - col = jtv - } - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentReferencedImageDetail(v **types.ReferencedImageDetail, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ReferencedImageDetail - if *v == nil { - sv = &types.ReferencedImageDetail{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "artifactMediaType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) - } - sv.ArtifactMediaType = ptr.String(jtv) - } - - case "imageDigest": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) - } - sv.ImageDigest = ptr.String(jtv) - } - - case "imageManifestMediaType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) - } - sv.ImageManifestMediaType = ptr.String(jtv) - } - - case "imagePushedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.ImagePushedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected PushTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "imageSizeInBytes": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ImageSizeInBytes to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ImageSizeInBytes = ptr.Int64(i64) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentReferencedImagesNotFoundException(v **types.ReferencedImagesNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ReferencedImagesNotFoundException - if *v == nil { - sv = &types.ReferencedImagesNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRegistry(v **types.Registry, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.Registry - if *v == nil { - sv = &types.Registry{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "aliases": - if err := awsAwsjson11_deserializeDocumentRegistryAliasList(&sv.Aliases, value); err != nil { - return err - } - - case "registryArn": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Arn to be of type string, got %T instead", value) - } - sv.RegistryArn = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "registryUri": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Url to be of type string, got %T instead", value) - } - sv.RegistryUri = ptr.String(jtv) - } - - case "verified": - if value != nil { - jtv, ok := value.(bool) - if !ok { - return fmt.Errorf("expected RegistryVerified to be of type *bool, got %T instead", value) - } - sv.Verified = ptr.Bool(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRegistryAlias(v **types.RegistryAlias, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RegistryAlias - if *v == nil { - sv = &types.RegistryAlias{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "defaultRegistryAlias": - if value != nil { - jtv, ok := value.(bool) - if !ok { - return fmt.Errorf("expected DefaultRegistryAliasFlag to be of type *bool, got %T instead", value) - } - sv.DefaultRegistryAlias = jtv - } - - case "name": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryAliasName to be of type string, got %T instead", value) - } - sv.Name = ptr.String(jtv) - } - - case "primaryRegistryAlias": - if value != nil { - jtv, ok := value.(bool) - if !ok { - return fmt.Errorf("expected PrimaryRegistryAliasFlag to be of type *bool, got %T instead", value) - } - sv.PrimaryRegistryAlias = jtv - } - - case "status": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryAliasStatus to be of type string, got %T instead", value) - } - sv.Status = types.RegistryAliasStatus(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRegistryAliasList(v *[]types.RegistryAlias, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.RegistryAlias - if *v == nil { - cv = []types.RegistryAlias{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.RegistryAlias - destAddr := &col - if err := awsAwsjson11_deserializeDocumentRegistryAlias(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentRegistryCatalogData(v **types.RegistryCatalogData, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RegistryCatalogData - if *v == nil { - sv = &types.RegistryCatalogData{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "displayName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryDisplayName to be of type string, got %T instead", value) - } - sv.DisplayName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRegistryList(v *[]types.Registry, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.Registry - if *v == nil { - cv = []types.Registry{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.Registry - destAddr := &col - if err := awsAwsjson11_deserializeDocumentRegistry(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentRegistryNotFoundException(v **types.RegistryNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RegistryNotFoundException - if *v == nil { - sv = &types.RegistryNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRepository(v **types.Repository, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.Repository - if *v == nil { - sv = &types.Repository{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "createdAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected CreationTimestamp to be a JSON Number, got %T instead", value) - - } - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryArn": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Arn to be of type string, got %T instead", value) - } - sv.RepositoryArn = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - case "repositoryUri": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Url to be of type string, got %T instead", value) - } - sv.RepositoryUri = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryAlreadyExistsException(v **types.RepositoryAlreadyExistsException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RepositoryAlreadyExistsException - if *v == nil { - sv = &types.RepositoryAlreadyExistsException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryCatalogData(v **types.RepositoryCatalogData, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RepositoryCatalogData - if *v == nil { - sv = &types.RepositoryCatalogData{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "aboutText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AboutText to be of type string, got %T instead", value) - } - sv.AboutText = ptr.String(jtv) - } - - case "architectures": - if err := awsAwsjson11_deserializeDocumentArchitectureList(&sv.Architectures, value); err != nil { - return err - } - - case "description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryDescription to be of type string, got %T instead", value) - } - sv.Description = ptr.String(jtv) - } - - case "logoUrl": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ResourceUrl to be of type string, got %T instead", value) - } - sv.LogoUrl = ptr.String(jtv) - } - - case "marketplaceCertified": - if value != nil { - jtv, ok := value.(bool) - if !ok { - return fmt.Errorf("expected MarketplaceCertified to be of type *bool, got %T instead", value) - } - sv.MarketplaceCertified = ptr.Bool(jtv) - } - - case "operatingSystems": - if err := awsAwsjson11_deserializeDocumentOperatingSystemList(&sv.OperatingSystems, value); err != nil { - return err - } - - case "usageText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UsageText to be of type string, got %T instead", value) - } - sv.UsageText = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryCatalogDataNotFoundException(v **types.RepositoryCatalogDataNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RepositoryCatalogDataNotFoundException - if *v == nil { - sv = &types.RepositoryCatalogDataNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryList(v *[]types.Repository, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.Repository - if *v == nil { - cv = []types.Repository{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.Repository - destAddr := &col - if err := awsAwsjson11_deserializeDocumentRepository(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryNotEmptyException(v **types.RepositoryNotEmptyException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RepositoryNotEmptyException - if *v == nil { - sv = &types.RepositoryNotEmptyException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryNotFoundException(v **types.RepositoryNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RepositoryNotFoundException - if *v == nil { - sv = &types.RepositoryNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentRepositoryPolicyNotFoundException(v **types.RepositoryPolicyNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RepositoryPolicyNotFoundException - if *v == nil { - sv = &types.RepositoryPolicyNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentServerException(v **types.ServerException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ServerException - if *v == nil { - sv = &types.ServerException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentTag(v **types.Tag, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.Tag - if *v == nil { - sv = &types.Tag{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "Key": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected TagKey to be of type string, got %T instead", value) - } - sv.Key = ptr.String(jtv) - } - - case "Value": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) - } - sv.Value = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentTagList(v *[]types.Tag, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.Tag - if *v == nil { - cv = []types.Tag{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.Tag - destAddr := &col - if err := awsAwsjson11_deserializeDocumentTag(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson11_deserializeDocumentTooManyTagsException(v **types.TooManyTagsException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.TooManyTagsException - if *v == nil { - sv = &types.TooManyTagsException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentUnsupportedCommandException(v **types.UnsupportedCommandException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnsupportedCommandException - if *v == nil { - sv = &types.UnsupportedCommandException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeDocumentUploadNotFoundException(v **types.UploadNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UploadNotFoundException - if *v == nil { - sv = &types.UploadNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentBatchCheckLayerAvailabilityOutput(v **BatchCheckLayerAvailabilityOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *BatchCheckLayerAvailabilityOutput - if *v == nil { - sv = &BatchCheckLayerAvailabilityOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "failures": - if err := awsAwsjson11_deserializeDocumentLayerFailureList(&sv.Failures, value); err != nil { - return err - } - - case "layers": - if err := awsAwsjson11_deserializeDocumentLayerList(&sv.Layers, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentBatchDeleteImageOutput(v **BatchDeleteImageOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *BatchDeleteImageOutput - if *v == nil { - sv = &BatchDeleteImageOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "failures": - if err := awsAwsjson11_deserializeDocumentImageFailureList(&sv.Failures, value); err != nil { - return err - } - - case "imageIds": - if err := awsAwsjson11_deserializeDocumentImageIdentifierList(&sv.ImageIds, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentCompleteLayerUploadOutput(v **CompleteLayerUploadOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *CompleteLayerUploadOutput - if *v == nil { - sv = &CompleteLayerUploadOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "layerDigest": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LayerDigest to be of type string, got %T instead", value) - } - sv.LayerDigest = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - case "uploadId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) - } - sv.UploadId = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentCreateRepositoryOutput(v **CreateRepositoryOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *CreateRepositoryOutput - if *v == nil { - sv = &CreateRepositoryOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "catalogData": - if err := awsAwsjson11_deserializeDocumentRepositoryCatalogData(&sv.CatalogData, value); err != nil { - return err - } - - case "repository": - if err := awsAwsjson11_deserializeDocumentRepository(&sv.Repository, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDeleteRepositoryOutput(v **DeleteRepositoryOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DeleteRepositoryOutput - if *v == nil { - sv = &DeleteRepositoryOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "repository": - if err := awsAwsjson11_deserializeDocumentRepository(&sv.Repository, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDeleteRepositoryPolicyOutput(v **DeleteRepositoryPolicyOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DeleteRepositoryPolicyOutput - if *v == nil { - sv = &DeleteRepositoryPolicyOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "policyText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) - } - sv.PolicyText = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDescribeImagesOutput(v **DescribeImagesOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DescribeImagesOutput - if *v == nil { - sv = &DescribeImagesOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "imageDetails": - if err := awsAwsjson11_deserializeDocumentImageDetailList(&sv.ImageDetails, value); err != nil { - return err - } - - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDescribeImageTagsOutput(v **DescribeImageTagsOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DescribeImageTagsOutput - if *v == nil { - sv = &DescribeImageTagsOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "imageTagDetails": - if err := awsAwsjson11_deserializeDocumentImageTagDetailList(&sv.ImageTagDetails, value); err != nil { - return err - } - - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDescribeRegistriesOutput(v **DescribeRegistriesOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DescribeRegistriesOutput - if *v == nil { - sv = &DescribeRegistriesOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - case "registries": - if err := awsAwsjson11_deserializeDocumentRegistryList(&sv.Registries, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentDescribeRepositoriesOutput(v **DescribeRepositoriesOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *DescribeRepositoriesOutput - if *v == nil { - sv = &DescribeRepositoriesOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - case "repositories": - if err := awsAwsjson11_deserializeDocumentRepositoryList(&sv.Repositories, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentGetAuthorizationTokenOutput(v **GetAuthorizationTokenOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *GetAuthorizationTokenOutput - if *v == nil { - sv = &GetAuthorizationTokenOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "authorizationData": - if err := awsAwsjson11_deserializeDocumentAuthorizationData(&sv.AuthorizationData, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentGetRegistryCatalogDataOutput(v **GetRegistryCatalogDataOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *GetRegistryCatalogDataOutput - if *v == nil { - sv = &GetRegistryCatalogDataOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "registryCatalogData": - if err := awsAwsjson11_deserializeDocumentRegistryCatalogData(&sv.RegistryCatalogData, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentGetRepositoryCatalogDataOutput(v **GetRepositoryCatalogDataOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *GetRepositoryCatalogDataOutput - if *v == nil { - sv = &GetRepositoryCatalogDataOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "catalogData": - if err := awsAwsjson11_deserializeDocumentRepositoryCatalogData(&sv.CatalogData, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentGetRepositoryPolicyOutput(v **GetRepositoryPolicyOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *GetRepositoryPolicyOutput - if *v == nil { - sv = &GetRepositoryPolicyOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "policyText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) - } - sv.PolicyText = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentInitiateLayerUploadOutput(v **InitiateLayerUploadOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *InitiateLayerUploadOutput - if *v == nil { - sv = &InitiateLayerUploadOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "partSize": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected PartSize to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.PartSize = ptr.Int64(i64) - } - - case "uploadId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) - } - sv.UploadId = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *ListTagsForResourceOutput - if *v == nil { - sv = &ListTagsForResourceOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "tags": - if err := awsAwsjson11_deserializeDocumentTagList(&sv.Tags, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentPutImageOutput(v **PutImageOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *PutImageOutput - if *v == nil { - sv = &PutImageOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "image": - if err := awsAwsjson11_deserializeDocumentImage(&sv.Image, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentPutRegistryCatalogDataOutput(v **PutRegistryCatalogDataOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *PutRegistryCatalogDataOutput - if *v == nil { - sv = &PutRegistryCatalogDataOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "registryCatalogData": - if err := awsAwsjson11_deserializeDocumentRegistryCatalogData(&sv.RegistryCatalogData, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentPutRepositoryCatalogDataOutput(v **PutRepositoryCatalogDataOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *PutRepositoryCatalogDataOutput - if *v == nil { - sv = &PutRepositoryCatalogDataOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "catalogData": - if err := awsAwsjson11_deserializeDocumentRepositoryCatalogData(&sv.CatalogData, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentSetRepositoryPolicyOutput(v **SetRepositoryPolicyOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *SetRepositoryPolicyOutput - if *v == nil { - sv = &SetRepositoryPolicyOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "policyText": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) - } - sv.PolicyText = ptr.String(jtv) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentTagResourceOutput(v **TagResourceOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *TagResourceOutput - if *v == nil { - sv = &TagResourceOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentUntagResourceOutput(v **UntagResourceOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *UntagResourceOutput - if *v == nil { - sv = &UntagResourceOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson11_deserializeOpDocumentUploadLayerPartOutput(v **UploadLayerPartOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *UploadLayerPartOutput - if *v == nil { - sv = &UploadLayerPartOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "lastByteReceived": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected PartSize to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.LastByteReceived = ptr.Int64(i64) - } - - case "registryId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) - } - sv.RegistryId = ptr.String(jtv) - } - - case "repositoryName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) - } - sv.RepositoryName = ptr.String(jtv) - } - - case "uploadId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) - } - sv.UploadId = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type protocolErrorInfo struct { - Type string `json:"__type"` - Message string - Code any // nonstandard for awsjson but some services do present the type here -} - -func getProtocolErrorInfo(decoder *json.Decoder) (protocolErrorInfo, error) { - var errInfo protocolErrorInfo - if err := decoder.Decode(&errInfo); err != nil { - if err == io.EOF { - return errInfo, nil - } - return errInfo, err - } - - return errInfo, nil -} - -func resolveProtocolErrorType(headerType string, bodyInfo protocolErrorInfo) (string, bool) { - if len(headerType) != 0 { - return headerType, true - } else if len(bodyInfo.Type) != 0 { - return bodyInfo.Type, true - } else if code, ok := bodyInfo.Code.(string); ok && len(code) != 0 { - return code, true - } - return "", false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/doc.go deleted file mode 100644 index 8706038fd3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -// Package ecrpublic provides the API client, operations, and parameter types for -// Amazon Elastic Container Registry Public. -// -// Amazon Elastic Container Registry Public Amazon Elastic Container Registry -// Public (Amazon ECR Public) is a managed container image registry service. Amazon -// ECR provides both public and private registries to host your container images. -// You can use the Docker CLI or your preferred client to push, pull, and manage -// images. Amazon ECR provides a secure, scalable, and reliable registry for your -// Docker or Open Container Initiative (OCI) images. Amazon ECR supports public -// repositories with this API. For information about the Amazon ECR API for private -// repositories, see Amazon Elastic Container Registry API Reference (https://docs.aws.amazon.com/AmazonECR/latest/APIReference/Welcome.html) -// . -package ecrpublic diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/endpoints.go deleted file mode 100644 index 95e0c365a3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/endpoints.go +++ /dev/null @@ -1,516 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - "github.com/aws/aws-sdk-go-v2/internal/endpoints" - "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" - internalendpoints "github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints" - smithyauth "github.com/aws/smithy-go/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "net/url" - "os" - "strings" -) - -// EndpointResolverOptions is the service endpoint resolver options -type EndpointResolverOptions = internalendpoints.Options - -// EndpointResolver interface for resolving service endpoints. -type EndpointResolver interface { - ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) -} - -var _ EndpointResolver = &internalendpoints.Resolver{} - -// NewDefaultEndpointResolver constructs a new service endpoint resolver -func NewDefaultEndpointResolver() *internalendpoints.Resolver { - return internalendpoints.New() -} - -// EndpointResolverFunc is a helper utility that wraps a function so it satisfies -// the EndpointResolver interface. This is useful when you want to add additional -// endpoint resolving logic, or stub out specific endpoints with custom values. -type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) - -func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return fn(region, options) -} - -// EndpointResolverFromURL returns an EndpointResolver configured using the -// provided endpoint url. By default, the resolved endpoint resolver uses the -// client region as signing region, and the endpoint source is set to -// EndpointSourceCustom.You can provide functional options to configure endpoint -// values for the resolved endpoint. -func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { - e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} - for _, fn := range optFns { - fn(&e) - } - - return EndpointResolverFunc( - func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { - if len(e.SigningRegion) == 0 { - e.SigningRegion = region - } - return e, nil - }, - ) -} - -type ResolveEndpoint struct { - Resolver EndpointResolver - Options EndpointResolverOptions -} - -func (*ResolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.Resolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - eo := m.Options - eo.Logger = middleware.GetLogger(ctx) - - var endpoint aws.Endpoint - endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) - if err != nil { - nf := (&aws.EndpointNotFoundError{}) - if errors.As(err, &nf) { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) - return next.HandleSerialize(ctx, in) - } - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(awsmiddleware.GetSigningName(ctx)) == 0 { - signingName := endpoint.SigningName - if len(signingName) == 0 { - signingName = "ecr-public" - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - } - ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) - ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) - return next.HandleSerialize(ctx, in) -} -func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&ResolveEndpoint{ - Resolver: o.EndpointResolver, - Options: o.EndpointOptions, - }, "OperationSerializer", middleware.Before) -} - -func removeResolveEndpointMiddleware(stack *middleware.Stack) error { - _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) - return err -} - -type wrappedEndpointResolver struct { - awsResolver aws.EndpointResolverWithOptions -} - -func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return w.awsResolver.ResolveEndpoint(ServiceID, region, options) -} - -type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) - -func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { - return a(service, region) -} - -var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) - -// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. -// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, -// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked -// via its middleware. -// -// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. -func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { - var resolver aws.EndpointResolverWithOptions - - if awsResolverWithOptions != nil { - resolver = awsResolverWithOptions - } else if awsResolver != nil { - resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) - } - - return &wrappedEndpointResolver{ - awsResolver: resolver, - } -} - -func finalizeClientEndpointResolverOptions(options *Options) { - options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() - - if len(options.EndpointOptions.ResolvedRegion) == 0 { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(options.Region, fipsInfix) || - strings.Contains(options.Region, fipsPrefix) || - strings.Contains(options.Region, fipsSuffix) { - options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled - } - } - -} - -func resolveEndpointResolverV2(options *Options) { - if options.EndpointResolverV2 == nil { - options.EndpointResolverV2 = NewDefaultEndpointResolverV2() - } -} - -func resolveBaseEndpoint(cfg aws.Config, o *Options) { - if cfg.BaseEndpoint != nil { - o.BaseEndpoint = cfg.BaseEndpoint - } - - _, g := os.LookupEnv("AWS_ENDPOINT_URL") - _, s := os.LookupEnv("AWS_ENDPOINT_URL_ECR_PUBLIC") - - if g && !s { - return - } - - value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "ECR PUBLIC", cfg.ConfigSources) - if found && err == nil { - o.BaseEndpoint = &value - } -} - -func bindRegion(region string) *string { - if region == "" { - return nil - } - return aws.String(endpoints.MapFIPSRegion(region)) -} - -// EndpointParameters provides the parameters that influence how endpoints are -// resolved. -type EndpointParameters struct { - // The AWS region used to dispatch the request. - // - // Parameter is - // required. - // - // AWS::Region - Region *string - - // When true, use the dual-stack endpoint. If the configured endpoint does not - // support dual-stack, dispatching the request MAY return an error. - // - // Defaults to - // false if no value is provided. - // - // AWS::UseDualStack - UseDualStack *bool - - // When true, send this request to the FIPS-compliant regional endpoint. If the - // configured endpoint does not have a FIPS compliant endpoint, dispatching the - // request will return an error. - // - // Defaults to false if no value is - // provided. - // - // AWS::UseFIPS - UseFIPS *bool - - // Override the endpoint used to send this request - // - // Parameter is - // required. - // - // SDK::Endpoint - Endpoint *string -} - -// ValidateRequired validates required parameters are set. -func (p EndpointParameters) ValidateRequired() error { - if p.UseDualStack == nil { - return fmt.Errorf("parameter UseDualStack is required") - } - - if p.UseFIPS == nil { - return fmt.Errorf("parameter UseFIPS is required") - } - - return nil -} - -// WithDefaults returns a shallow copy of EndpointParameterswith default values -// applied to members where applicable. -func (p EndpointParameters) WithDefaults() EndpointParameters { - if p.UseDualStack == nil { - p.UseDualStack = ptr.Bool(false) - } - - if p.UseFIPS == nil { - p.UseFIPS = ptr.Bool(false) - } - return p -} - -// EndpointResolverV2 provides the interface for resolving service endpoints. -type EndpointResolverV2 interface { - // ResolveEndpoint attempts to resolve the endpoint with the provided options, - // returning the endpoint if found. Otherwise an error is returned. - ResolveEndpoint(ctx context.Context, params EndpointParameters) ( - smithyendpoints.Endpoint, error, - ) -} - -// resolver provides the implementation for resolving endpoints. -type resolver struct{} - -func NewDefaultEndpointResolverV2() EndpointResolverV2 { - return &resolver{} -} - -// ResolveEndpoint attempts to resolve the endpoint with the provided options, -// returning the endpoint if found. Otherwise an error is returned. -func (r *resolver) ResolveEndpoint( - ctx context.Context, params EndpointParameters, -) ( - endpoint smithyendpoints.Endpoint, err error, -) { - params = params.WithDefaults() - if err = params.ValidateRequired(); err != nil { - return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) - } - _UseDualStack := *params.UseDualStack - _UseFIPS := *params.UseFIPS - - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") - } - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") - } - uriString := _Endpoint - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _PartitionResult := *exprVal - _ = _PartitionResult - if _UseFIPS == true { - if _UseDualStack == true { - if true == _PartitionResult.SupportsFIPS { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://api.ecr-public-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") - } - } - if _UseFIPS == true { - if true == _PartitionResult.SupportsFIPS { - uriString := func() string { - var out strings.Builder - out.WriteString("https://api.ecr-public-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") - } - if _UseDualStack == true { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://api.ecr-public.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://api.ecr-public.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") -} - -type endpointParamsBinder interface { - bindEndpointParams(*EndpointParameters) -} - -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { - params := &EndpointParameters{} - - params.Region = bindRegion(options.Region) - params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) - params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) - params.Endpoint = options.BaseEndpoint - - if b, ok := input.(endpointParamsBinder); ok { - b.bindEndpointParams(params) - } - - return params -} - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.options.EndpointResolverV2 == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := bindEndpointParams(getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - if endpt.URI.RawPath == "" && req.URL.RawPath != "" { - endpt.URI.RawPath = endpt.URI.Path - } - req.URL.Scheme = endpt.URI.Scheme - req.URL.Host = endpt.URI.Host - req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) - req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) - for k := range endpt.Headers { - req.Header.Set(k, endpt.Headers.Get(k)) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) - for _, o := range opts { - rscheme.SignerProperties.SetAll(&o.SignerProperties) - } - - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/generated.json deleted file mode 100644 index f11524a5fe..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/generated.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "dependencies": { - "github.com/aws/aws-sdk-go-v2": "v1.4.0", - "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/smithy-go": "v1.4.0" - }, - "files": [ - "api_client.go", - "api_client_test.go", - "api_op_BatchCheckLayerAvailability.go", - "api_op_BatchDeleteImage.go", - "api_op_CompleteLayerUpload.go", - "api_op_CreateRepository.go", - "api_op_DeleteRepository.go", - "api_op_DeleteRepositoryPolicy.go", - "api_op_DescribeImageTags.go", - "api_op_DescribeImages.go", - "api_op_DescribeRegistries.go", - "api_op_DescribeRepositories.go", - "api_op_GetAuthorizationToken.go", - "api_op_GetRegistryCatalogData.go", - "api_op_GetRepositoryCatalogData.go", - "api_op_GetRepositoryPolicy.go", - "api_op_InitiateLayerUpload.go", - "api_op_ListTagsForResource.go", - "api_op_PutImage.go", - "api_op_PutRegistryCatalogData.go", - "api_op_PutRepositoryCatalogData.go", - "api_op_SetRepositoryPolicy.go", - "api_op_TagResource.go", - "api_op_UntagResource.go", - "api_op_UploadLayerPart.go", - "auth.go", - "deserializers.go", - "doc.go", - "endpoints.go", - "endpoints_config_test.go", - "endpoints_test.go", - "generated.json", - "internal/endpoints/endpoints.go", - "internal/endpoints/endpoints_test.go", - "options.go", - "protocol_test.go", - "serializers.go", - "snapshot_test.go", - "types/enums.go", - "types/errors.go", - "types/types.go", - "validators.go" - ], - "go": "1.15", - "module": "github.com/aws/aws-sdk-go-v2/service/ecrpublic", - "unstable": false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/go_module_metadata.go deleted file mode 100644 index 6bb90d74a0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package ecrpublic - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.23.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints/endpoints.go deleted file mode 100644 index 7f12229598..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints/endpoints.go +++ /dev/null @@ -1,314 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package endpoints - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" - "github.com/aws/smithy-go/logging" - "regexp" -) - -// Options is the endpoint resolver configuration options -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the - // provided logger. - LogDeprecated bool - - // ResolvedRegion is used to override the region to be resolved, rather then the - // using the value passed to the ResolveEndpoint method. This value is used by the - // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative - // name. You must not set this value directly in your application. - ResolvedRegion string - - // DisableHTTPS informs the resolver to return an endpoint that does not use the - // HTTPS scheme. - DisableHTTPS bool - - // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. - UseDualStackEndpoint aws.DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint aws.FIPSEndpointState -} - -func (o Options) GetResolvedRegion() string { - return o.ResolvedRegion -} - -func (o Options) GetDisableHTTPS() bool { - return o.DisableHTTPS -} - -func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { - return o.UseDualStackEndpoint -} - -func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { - return o.UseFIPSEndpoint -} - -func transformToSharedOptions(options Options) endpoints.Options { - return endpoints.Options{ - Logger: options.Logger, - LogDeprecated: options.LogDeprecated, - ResolvedRegion: options.ResolvedRegion, - DisableHTTPS: options.DisableHTTPS, - UseDualStackEndpoint: options.UseDualStackEndpoint, - UseFIPSEndpoint: options.UseFIPSEndpoint, - } -} - -// Resolver ECR PUBLIC endpoint resolver -type Resolver struct { - partitions endpoints.Partitions -} - -// ResolveEndpoint resolves the service endpoint for the given region and options -func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { - if len(region) == 0 { - return endpoint, &aws.MissingRegionError{} - } - - opt := transformToSharedOptions(options) - return r.partitions.ResolveEndpoint(region, opt) -} - -// New returns a new Resolver -func New() *Resolver { - return &Resolver{ - partitions: defaultPartitions, - } -} - -var partitionRegexp = struct { - Aws *regexp.Regexp - AwsCn *regexp.Regexp - AwsIso *regexp.Regexp - AwsIsoB *regexp.Regexp - AwsIsoE *regexp.Regexp - AwsIsoF *regexp.Regexp - AwsUsGov *regexp.Regexp -}{ - - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), - AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), - AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), - AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), - AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), - AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), - AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), -} - -var defaultPartitions = endpoints.Partitions{ - { - ID: "aws", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "api.ecr-public.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "api.ecr-public-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "api.ecr-public-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "api.ecr-public.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.Aws, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-east-1", - }: endpoints.Endpoint{ - Hostname: "api.ecr-public.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - }: endpoints.Endpoint{ - Hostname: "api.ecr-public.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - { - ID: "aws-cn", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "api.ecr-public.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "api.ecr-public-fips.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "api.ecr-public-fips.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "api.ecr-public.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsCn, - IsRegionalized: true, - }, - { - ID: "aws-iso", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "api.ecr-public-fips.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "api.ecr-public.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIso, - IsRegionalized: true, - }, - { - ID: "aws-iso-b", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "api.ecr-public-fips.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "api.ecr-public.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoB, - IsRegionalized: true, - }, - { - ID: "aws-iso-e", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "api.ecr-public-fips.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "api.ecr-public.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoE, - IsRegionalized: true, - }, - { - ID: "aws-iso-f", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "api.ecr-public-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "api.ecr-public.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoF, - IsRegionalized: true, - }, - { - ID: "aws-us-gov", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "api.ecr-public.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "api.ecr-public-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "api.ecr-public-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "api.ecr-public.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsUsGov, - IsRegionalized: true, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/options.go deleted file mode 100644 index 2845b43a8d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/options.go +++ /dev/null @@ -1,217 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" -) - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. - EndpointResolverV2 EndpointResolverV2 - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // The auth scheme resolver which determines how to authenticate for each - // operation. - AuthSchemeResolver AuthSchemeResolver - - // The list of auth schemes supported by the client. - AuthSchemes []smithyhttp.AuthScheme -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - - return to -} - -func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { - if schemeID == "aws.auth#sigv4" { - return getSigV4IdentityResolver(o) - } - if schemeID == "smithy.api#noAuth" { - return &smithyauth.AnonymousIdentityResolver{} - } - return nil -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { - if o.Credentials != nil { - return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} - } - return nil -} - -// WithSigV4SigningName applies an override to the authentication workflow to -// use the given signing name for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing name from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningName(name string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), - middleware.Before, - ) - }) - } -} - -// WithSigV4SigningRegion applies an override to the authentication workflow to -// use the given signing region for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing region from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningRegion(region string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), - middleware.Before, - ) - }) - } -} - -func ignoreAnonymousAuth(options *Options) { - if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { - options.Credentials = nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/serializers.go deleted file mode 100644 index 4fd9a7dc31..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/serializers.go +++ /dev/null @@ -1,1948 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "bytes" - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/encoding/httpbinding" - smithyjson "github.com/aws/smithy-go/encoding/json" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "path" -) - -type awsAwsjson11_serializeOpBatchCheckLayerAvailability struct { -} - -func (*awsAwsjson11_serializeOpBatchCheckLayerAvailability) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpBatchCheckLayerAvailability) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*BatchCheckLayerAvailabilityInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.BatchCheckLayerAvailability") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentBatchCheckLayerAvailabilityInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpBatchDeleteImage struct { -} - -func (*awsAwsjson11_serializeOpBatchDeleteImage) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpBatchDeleteImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*BatchDeleteImageInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.BatchDeleteImage") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentBatchDeleteImageInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpCompleteLayerUpload struct { -} - -func (*awsAwsjson11_serializeOpCompleteLayerUpload) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpCompleteLayerUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CompleteLayerUploadInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.CompleteLayerUpload") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentCompleteLayerUploadInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpCreateRepository struct { -} - -func (*awsAwsjson11_serializeOpCreateRepository) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpCreateRepository) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateRepositoryInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.CreateRepository") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentCreateRepositoryInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDeleteRepository struct { -} - -func (*awsAwsjson11_serializeOpDeleteRepository) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDeleteRepository) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteRepositoryInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.DeleteRepository") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDeleteRepositoryInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDeleteRepositoryPolicy struct { -} - -func (*awsAwsjson11_serializeOpDeleteRepositoryPolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDeleteRepositoryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteRepositoryPolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.DeleteRepositoryPolicy") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDeleteRepositoryPolicyInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDescribeImages struct { -} - -func (*awsAwsjson11_serializeOpDescribeImages) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDescribeImages) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DescribeImagesInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.DescribeImages") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDescribeImagesInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDescribeImageTags struct { -} - -func (*awsAwsjson11_serializeOpDescribeImageTags) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDescribeImageTags) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DescribeImageTagsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.DescribeImageTags") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDescribeImageTagsInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDescribeRegistries struct { -} - -func (*awsAwsjson11_serializeOpDescribeRegistries) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDescribeRegistries) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DescribeRegistriesInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.DescribeRegistries") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDescribeRegistriesInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpDescribeRepositories struct { -} - -func (*awsAwsjson11_serializeOpDescribeRepositories) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpDescribeRepositories) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DescribeRepositoriesInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.DescribeRepositories") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentDescribeRepositoriesInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpGetAuthorizationToken struct { -} - -func (*awsAwsjson11_serializeOpGetAuthorizationToken) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpGetAuthorizationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetAuthorizationTokenInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.GetAuthorizationToken") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentGetAuthorizationTokenInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpGetRegistryCatalogData struct { -} - -func (*awsAwsjson11_serializeOpGetRegistryCatalogData) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpGetRegistryCatalogData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetRegistryCatalogDataInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.GetRegistryCatalogData") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentGetRegistryCatalogDataInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpGetRepositoryCatalogData struct { -} - -func (*awsAwsjson11_serializeOpGetRepositoryCatalogData) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpGetRepositoryCatalogData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetRepositoryCatalogDataInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.GetRepositoryCatalogData") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentGetRepositoryCatalogDataInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpGetRepositoryPolicy struct { -} - -func (*awsAwsjson11_serializeOpGetRepositoryPolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpGetRepositoryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetRepositoryPolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.GetRepositoryPolicy") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentGetRepositoryPolicyInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpInitiateLayerUpload struct { -} - -func (*awsAwsjson11_serializeOpInitiateLayerUpload) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpInitiateLayerUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*InitiateLayerUploadInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.InitiateLayerUpload") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentInitiateLayerUploadInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpListTagsForResource struct { -} - -func (*awsAwsjson11_serializeOpListTagsForResource) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpListTagsForResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListTagsForResourceInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.ListTagsForResource") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentListTagsForResourceInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpPutImage struct { -} - -func (*awsAwsjson11_serializeOpPutImage) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpPutImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutImageInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.PutImage") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutImageInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpPutRegistryCatalogData struct { -} - -func (*awsAwsjson11_serializeOpPutRegistryCatalogData) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpPutRegistryCatalogData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutRegistryCatalogDataInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.PutRegistryCatalogData") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutRegistryCatalogDataInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpPutRepositoryCatalogData struct { -} - -func (*awsAwsjson11_serializeOpPutRepositoryCatalogData) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpPutRepositoryCatalogData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutRepositoryCatalogDataInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.PutRepositoryCatalogData") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentPutRepositoryCatalogDataInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpSetRepositoryPolicy struct { -} - -func (*awsAwsjson11_serializeOpSetRepositoryPolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpSetRepositoryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*SetRepositoryPolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.SetRepositoryPolicy") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentSetRepositoryPolicyInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpTagResource struct { -} - -func (*awsAwsjson11_serializeOpTagResource) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*TagResourceInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.TagResource") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpUntagResource struct { -} - -func (*awsAwsjson11_serializeOpUntagResource) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*UntagResourceInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.UntagResource") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsjson11_serializeOpUploadLayerPart struct { -} - -func (*awsAwsjson11_serializeOpUploadLayerPart) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsjson11_serializeOpUploadLayerPart) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*UploadLayerPartInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") - httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.UploadLayerPart") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson11_serializeOpDocumentUploadLayerPartInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} -func awsAwsjson11_serializeDocumentArchitectureList(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsjson11_serializeDocumentBatchedOperationLayerDigestList(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsjson11_serializeDocumentImageIdentifier(v *types.ImageIdentifier, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ImageDigest != nil { - ok := object.Key("imageDigest") - ok.String(*v.ImageDigest) - } - - if v.ImageTag != nil { - ok := object.Key("imageTag") - ok.String(*v.ImageTag) - } - - return nil -} - -func awsAwsjson11_serializeDocumentImageIdentifierList(v []types.ImageIdentifier, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - if err := awsAwsjson11_serializeDocumentImageIdentifier(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsjson11_serializeDocumentLayerDigestList(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsjson11_serializeDocumentOperatingSystemList(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsjson11_serializeDocumentRepositoryCatalogDataInput(v *types.RepositoryCatalogDataInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.AboutText != nil { - ok := object.Key("aboutText") - ok.String(*v.AboutText) - } - - if v.Architectures != nil { - ok := object.Key("architectures") - if err := awsAwsjson11_serializeDocumentArchitectureList(v.Architectures, ok); err != nil { - return err - } - } - - if v.Description != nil { - ok := object.Key("description") - ok.String(*v.Description) - } - - if v.LogoImageBlob != nil { - ok := object.Key("logoImageBlob") - ok.Base64EncodeBytes(v.LogoImageBlob) - } - - if v.OperatingSystems != nil { - ok := object.Key("operatingSystems") - if err := awsAwsjson11_serializeDocumentOperatingSystemList(v.OperatingSystems, ok); err != nil { - return err - } - } - - if v.UsageText != nil { - ok := object.Key("usageText") - ok.String(*v.UsageText) - } - - return nil -} - -func awsAwsjson11_serializeDocumentRepositoryNameList(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsjson11_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Key != nil { - ok := object.Key("Key") - ok.String(*v.Key) - } - - if v.Value != nil { - ok := object.Key("Value") - ok.String(*v.Value) - } - - return nil -} - -func awsAwsjson11_serializeDocumentTagKeyList(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsjson11_serializeDocumentTagList(v []types.Tag, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - if err := awsAwsjson11_serializeDocumentTag(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsjson11_serializeOpDocumentBatchCheckLayerAvailabilityInput(v *BatchCheckLayerAvailabilityInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.LayerDigests != nil { - ok := object.Key("layerDigests") - if err := awsAwsjson11_serializeDocumentBatchedOperationLayerDigestList(v.LayerDigests, ok); err != nil { - return err - } - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentBatchDeleteImageInput(v *BatchDeleteImageInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ImageIds != nil { - ok := object.Key("imageIds") - if err := awsAwsjson11_serializeDocumentImageIdentifierList(v.ImageIds, ok); err != nil { - return err - } - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentCompleteLayerUploadInput(v *CompleteLayerUploadInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.LayerDigests != nil { - ok := object.Key("layerDigests") - if err := awsAwsjson11_serializeDocumentLayerDigestList(v.LayerDigests, ok); err != nil { - return err - } - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - if v.UploadId != nil { - ok := object.Key("uploadId") - ok.String(*v.UploadId) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentCreateRepositoryInput(v *CreateRepositoryInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.CatalogData != nil { - ok := object.Key("catalogData") - if err := awsAwsjson11_serializeDocumentRepositoryCatalogDataInput(v.CatalogData, ok); err != nil { - return err - } - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - if v.Tags != nil { - ok := object.Key("tags") - if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { - return err - } - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentDeleteRepositoryInput(v *DeleteRepositoryInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Force { - ok := object.Key("force") - ok.Boolean(v.Force) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentDeleteRepositoryPolicyInput(v *DeleteRepositoryPolicyInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentDescribeImagesInput(v *DescribeImagesInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ImageIds != nil { - ok := object.Key("imageIds") - if err := awsAwsjson11_serializeDocumentImageIdentifierList(v.ImageIds, ok); err != nil { - return err - } - } - - if v.MaxResults != nil { - ok := object.Key("maxResults") - ok.Integer(*v.MaxResults) - } - - if v.NextToken != nil { - ok := object.Key("nextToken") - ok.String(*v.NextToken) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentDescribeImageTagsInput(v *DescribeImageTagsInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.MaxResults != nil { - ok := object.Key("maxResults") - ok.Integer(*v.MaxResults) - } - - if v.NextToken != nil { - ok := object.Key("nextToken") - ok.String(*v.NextToken) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentDescribeRegistriesInput(v *DescribeRegistriesInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.MaxResults != nil { - ok := object.Key("maxResults") - ok.Integer(*v.MaxResults) - } - - if v.NextToken != nil { - ok := object.Key("nextToken") - ok.String(*v.NextToken) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentDescribeRepositoriesInput(v *DescribeRepositoriesInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.MaxResults != nil { - ok := object.Key("maxResults") - ok.Integer(*v.MaxResults) - } - - if v.NextToken != nil { - ok := object.Key("nextToken") - ok.String(*v.NextToken) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryNames != nil { - ok := object.Key("repositoryNames") - if err := awsAwsjson11_serializeDocumentRepositoryNameList(v.RepositoryNames, ok); err != nil { - return err - } - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentGetAuthorizationTokenInput(v *GetAuthorizationTokenInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - return nil -} - -func awsAwsjson11_serializeOpDocumentGetRegistryCatalogDataInput(v *GetRegistryCatalogDataInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - return nil -} - -func awsAwsjson11_serializeOpDocumentGetRepositoryCatalogDataInput(v *GetRepositoryCatalogDataInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentGetRepositoryPolicyInput(v *GetRepositoryPolicyInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentInitiateLayerUploadInput(v *InitiateLayerUploadInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentListTagsForResourceInput(v *ListTagsForResourceInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ResourceArn != nil { - ok := object.Key("resourceArn") - ok.String(*v.ResourceArn) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentPutImageInput(v *PutImageInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ImageDigest != nil { - ok := object.Key("imageDigest") - ok.String(*v.ImageDigest) - } - - if v.ImageManifest != nil { - ok := object.Key("imageManifest") - ok.String(*v.ImageManifest) - } - - if v.ImageManifestMediaType != nil { - ok := object.Key("imageManifestMediaType") - ok.String(*v.ImageManifestMediaType) - } - - if v.ImageTag != nil { - ok := object.Key("imageTag") - ok.String(*v.ImageTag) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentPutRegistryCatalogDataInput(v *PutRegistryCatalogDataInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.DisplayName != nil { - ok := object.Key("displayName") - ok.String(*v.DisplayName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentPutRepositoryCatalogDataInput(v *PutRepositoryCatalogDataInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.CatalogData != nil { - ok := object.Key("catalogData") - if err := awsAwsjson11_serializeDocumentRepositoryCatalogDataInput(v.CatalogData, ok); err != nil { - return err - } - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentSetRepositoryPolicyInput(v *SetRepositoryPolicyInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Force { - ok := object.Key("force") - ok.Boolean(v.Force) - } - - if v.PolicyText != nil { - ok := object.Key("policyText") - ok.String(*v.PolicyText) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ResourceArn != nil { - ok := object.Key("resourceArn") - ok.String(*v.ResourceArn) - } - - if v.Tags != nil { - ok := object.Key("tags") - if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { - return err - } - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ResourceArn != nil { - ok := object.Key("resourceArn") - ok.String(*v.ResourceArn) - } - - if v.TagKeys != nil { - ok := object.Key("tagKeys") - if err := awsAwsjson11_serializeDocumentTagKeyList(v.TagKeys, ok); err != nil { - return err - } - } - - return nil -} - -func awsAwsjson11_serializeOpDocumentUploadLayerPartInput(v *UploadLayerPartInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.LayerPartBlob != nil { - ok := object.Key("layerPartBlob") - ok.Base64EncodeBytes(v.LayerPartBlob) - } - - if v.PartFirstByte != nil { - ok := object.Key("partFirstByte") - ok.Long(*v.PartFirstByte) - } - - if v.PartLastByte != nil { - ok := object.Key("partLastByte") - ok.Long(*v.PartLastByte) - } - - if v.RegistryId != nil { - ok := object.Key("registryId") - ok.String(*v.RegistryId) - } - - if v.RepositoryName != nil { - ok := object.Key("repositoryName") - ok.String(*v.RepositoryName) - } - - if v.UploadId != nil { - ok := object.Key("uploadId") - ok.String(*v.UploadId) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/enums.go deleted file mode 100644 index d80a052447..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/enums.go +++ /dev/null @@ -1,87 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -type ImageFailureCode string - -// Enum values for ImageFailureCode -const ( - ImageFailureCodeInvalidImageDigest ImageFailureCode = "InvalidImageDigest" - ImageFailureCodeInvalidImageTag ImageFailureCode = "InvalidImageTag" - ImageFailureCodeImageTagDoesNotMatchDigest ImageFailureCode = "ImageTagDoesNotMatchDigest" - ImageFailureCodeImageNotFound ImageFailureCode = "ImageNotFound" - ImageFailureCodeMissingDigestAndTag ImageFailureCode = "MissingDigestAndTag" - ImageFailureCodeImageReferencedByManifestList ImageFailureCode = "ImageReferencedByManifestList" - ImageFailureCodeKmsError ImageFailureCode = "KmsError" -) - -// Values returns all known values for ImageFailureCode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (ImageFailureCode) Values() []ImageFailureCode { - return []ImageFailureCode{ - "InvalidImageDigest", - "InvalidImageTag", - "ImageTagDoesNotMatchDigest", - "ImageNotFound", - "MissingDigestAndTag", - "ImageReferencedByManifestList", - "KmsError", - } -} - -type LayerAvailability string - -// Enum values for LayerAvailability -const ( - LayerAvailabilityAvailable LayerAvailability = "AVAILABLE" - LayerAvailabilityUnavailable LayerAvailability = "UNAVAILABLE" -) - -// Values returns all known values for LayerAvailability. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (LayerAvailability) Values() []LayerAvailability { - return []LayerAvailability{ - "AVAILABLE", - "UNAVAILABLE", - } -} - -type LayerFailureCode string - -// Enum values for LayerFailureCode -const ( - LayerFailureCodeInvalidLayerDigest LayerFailureCode = "InvalidLayerDigest" - LayerFailureCodeMissingLayerDigest LayerFailureCode = "MissingLayerDigest" -) - -// Values returns all known values for LayerFailureCode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (LayerFailureCode) Values() []LayerFailureCode { - return []LayerFailureCode{ - "InvalidLayerDigest", - "MissingLayerDigest", - } -} - -type RegistryAliasStatus string - -// Enum values for RegistryAliasStatus -const ( - RegistryAliasStatusActive RegistryAliasStatus = "ACTIVE" - RegistryAliasStatusPending RegistryAliasStatus = "PENDING" - RegistryAliasStatusRejected RegistryAliasStatus = "REJECTED" -) - -// Values returns all known values for RegistryAliasStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (RegistryAliasStatus) Values() []RegistryAliasStatus { - return []RegistryAliasStatus{ - "ACTIVE", - "PENDING", - "REJECTED", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/errors.go deleted file mode 100644 index 24f4d43c76..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/errors.go +++ /dev/null @@ -1,655 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - "fmt" - smithy "github.com/aws/smithy-go" -) - -// The specified layer upload doesn't contain any layer parts. -type EmptyUploadException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *EmptyUploadException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *EmptyUploadException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *EmptyUploadException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "EmptyUploadException" - } - return *e.ErrorCodeOverride -} -func (e *EmptyUploadException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified image has already been pushed, and there were no changes to the -// manifest or image tag after the last push. -type ImageAlreadyExistsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ImageAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ImageAlreadyExistsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ImageAlreadyExistsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ImageAlreadyExistsException" - } - return *e.ErrorCodeOverride -} -func (e *ImageAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified image digest doesn't match the digest that Amazon ECR calculated -// for the image. -type ImageDigestDoesNotMatchException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ImageDigestDoesNotMatchException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ImageDigestDoesNotMatchException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ImageDigestDoesNotMatchException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ImageDigestDoesNotMatchException" - } - return *e.ErrorCodeOverride -} -func (e *ImageDigestDoesNotMatchException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The image requested doesn't exist in the specified repository. -type ImageNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ImageNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ImageNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ImageNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ImageNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *ImageNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified image is tagged with a tag that already exists. The repository is -// configured for tag immutability. -type ImageTagAlreadyExistsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ImageTagAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ImageTagAlreadyExistsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ImageTagAlreadyExistsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ImageTagAlreadyExistsException" - } - return *e.ErrorCodeOverride -} -func (e *ImageTagAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The layer digest calculation performed by Amazon ECR when the image layer -// doesn't match the digest specified. -type InvalidLayerException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidLayerException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidLayerException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidLayerException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidLayerException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidLayerException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The layer part size isn't valid, or the first byte specified isn't consecutive -// to the last byte of a previous layer part upload. -type InvalidLayerPartException struct { - Message *string - - ErrorCodeOverride *string - - RegistryId *string - RepositoryName *string - UploadId *string - LastValidByteReceived *int64 - - noSmithyDocumentSerde -} - -func (e *InvalidLayerPartException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidLayerPartException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidLayerPartException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidLayerPartException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidLayerPartException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified parameter is invalid. Review the available parameters for the API -// request. -type InvalidParameterException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidParameterException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidParameterException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidParameterException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidParameterException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// An invalid parameter has been specified. Tag keys can have a maximum character -// length of 128 characters, and tag values can have a maximum length of 256 -// characters. -type InvalidTagParameterException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidTagParameterException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidTagParameterException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidTagParameterException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidTagParameterException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidTagParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The image layer already exists in the associated repository. -type LayerAlreadyExistsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LayerAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LayerAlreadyExistsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LayerAlreadyExistsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LayerAlreadyExistsException" - } - return *e.ErrorCodeOverride -} -func (e *LayerAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Layer parts must be at least 5 MiB in size. -type LayerPartTooSmallException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LayerPartTooSmallException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LayerPartTooSmallException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LayerPartTooSmallException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LayerPartTooSmallException" - } - return *e.ErrorCodeOverride -} -func (e *LayerPartTooSmallException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified layers can't be found, or the specified layer isn't valid for -// this repository. -type LayersNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LayersNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LayersNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LayersNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LayersNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *LayersNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The operation didn't succeed because it would have exceeded a service limit for -// your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) -// in the Amazon Elastic Container Registry User Guide. -type LimitExceededException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LimitExceededException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LimitExceededException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LimitExceededException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LimitExceededException" - } - return *e.ErrorCodeOverride -} -func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The manifest list is referencing an image that doesn't exist. -type ReferencedImagesNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ReferencedImagesNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ReferencedImagesNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ReferencedImagesNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ReferencedImagesNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *ReferencedImagesNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The registry doesn't exist. -type RegistryNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RegistryNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RegistryNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RegistryNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RegistryNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *RegistryNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified repository already exists in the specified registry. -type RepositoryAlreadyExistsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RepositoryAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RepositoryAlreadyExistsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RepositoryAlreadyExistsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RepositoryAlreadyExistsException" - } - return *e.ErrorCodeOverride -} -func (e *RepositoryAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The repository catalog data doesn't exist. -type RepositoryCatalogDataNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RepositoryCatalogDataNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RepositoryCatalogDataNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RepositoryCatalogDataNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RepositoryCatalogDataNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *RepositoryCatalogDataNotFoundException) ErrorFault() smithy.ErrorFault { - return smithy.FaultClient -} - -// The specified repository contains images. To delete a repository that contains -// images, you must force the deletion with the force parameter. -type RepositoryNotEmptyException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RepositoryNotEmptyException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RepositoryNotEmptyException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RepositoryNotEmptyException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RepositoryNotEmptyException" - } - return *e.ErrorCodeOverride -} -func (e *RepositoryNotEmptyException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified repository can't be found. Check the spelling of the specified -// repository and ensure that you're performing operations on the correct registry. -type RepositoryNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RepositoryNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RepositoryNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RepositoryNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RepositoryNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *RepositoryNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified repository and registry combination doesn't have an associated -// repository policy. -type RepositoryPolicyNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RepositoryPolicyNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RepositoryPolicyNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RepositoryPolicyNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RepositoryPolicyNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *RepositoryPolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// These errors are usually caused by a server-side issue. -type ServerException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ServerException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ServerException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ServerException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ServerException" - } - return *e.ErrorCodeOverride -} -func (e *ServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } - -// The list of tags on the repository is over the limit. The maximum number of -// tags that can be applied to a repository is 50. -type TooManyTagsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *TooManyTagsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *TooManyTagsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *TooManyTagsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "TooManyTagsException" - } - return *e.ErrorCodeOverride -} -func (e *TooManyTagsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The action isn't supported in this Region. -type UnsupportedCommandException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *UnsupportedCommandException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnsupportedCommandException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnsupportedCommandException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnsupportedCommandException" - } - return *e.ErrorCodeOverride -} -func (e *UnsupportedCommandException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The upload can't be found, or the specified upload ID isn't valid for this -// repository. -type UploadNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *UploadNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UploadNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UploadNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UploadNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *UploadNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/types.go deleted file mode 100644 index b24a2f4e4b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/types.go +++ /dev/null @@ -1,396 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - smithydocument "github.com/aws/smithy-go/document" - "time" -) - -// An authorization token data object that corresponds to a public registry. -type AuthorizationData struct { - - // A base64-encoded string that contains authorization data for a public Amazon - // ECR registry. When the string is decoded, it's presented in the format - // user:password for public registry authentication using docker login . - AuthorizationToken *string - - // The Unix time in seconds and milliseconds when the authorization token expires. - // Authorization tokens are valid for 12 hours. - ExpiresAt *time.Time - - noSmithyDocumentSerde -} - -// An object that represents an Amazon ECR image. -type Image struct { - - // An object that contains the image tag and image digest associated with an image. - ImageId *ImageIdentifier - - // The image manifest that's associated with the image. - ImageManifest *string - - // The manifest media type of the image. - ImageManifestMediaType *string - - // The Amazon Web Services account ID that's associated with the registry - // containing the image. - RegistryId *string - - // The name of the repository that's associated with the image. - RepositoryName *string - - noSmithyDocumentSerde -} - -// An object that describes an image that's returned by a DescribeImages operation. -type ImageDetail struct { - - // The artifact media type of the image. - ArtifactMediaType *string - - // The sha256 digest of the image manifest. - ImageDigest *string - - // The media type of the image manifest. - ImageManifestMediaType *string - - // The date and time, expressed in standard JavaScript date format, that the - // current image was pushed to the repository at. - ImagePushedAt *time.Time - - // The size, in bytes, of the image in the repository. If the image is a manifest - // list, this is the max size of all manifests in the list. Beginning with Docker - // version 1.9, the Docker client compresses image layers before pushing them to a - // V2 Docker registry. The output of the docker images command shows the - // uncompressed image size, so it might return a larger image size than the image - // sizes that are returned by DescribeImages . - ImageSizeInBytes *int64 - - // The list of tags that's associated with this image. - ImageTags []string - - // The Amazon Web Services account ID that's associated with the public registry - // where this image belongs. - RegistryId *string - - // The name of the repository where this image belongs. - RepositoryName *string - - noSmithyDocumentSerde -} - -// An object that represents an Amazon ECR image failure. -type ImageFailure struct { - - // The code that's associated with the failure. - FailureCode ImageFailureCode - - // The reason for the failure. - FailureReason *string - - // The image ID that's associated with the failure. - ImageId *ImageIdentifier - - noSmithyDocumentSerde -} - -// An object with identifying information for an Amazon ECR image. -type ImageIdentifier struct { - - // The sha256 digest of the image manifest. - ImageDigest *string - - // The tag that's used for the image. - ImageTag *string - - noSmithyDocumentSerde -} - -// An object that represents the image tag details for an image. -type ImageTagDetail struct { - - // The time stamp that indicates when the image tag was created. - CreatedAt *time.Time - - // An object that describes the details of an image. - ImageDetail *ReferencedImageDetail - - // The tag that's associated with the image. - ImageTag *string - - noSmithyDocumentSerde -} - -// An object that represents an Amazon ECR image layer. -type Layer struct { - - // The availability status of the image layer. - LayerAvailability LayerAvailability - - // The sha256 digest of the image layer. - LayerDigest *string - - // The size, in bytes, of the image layer. - LayerSize *int64 - - // The media type of the layer, such as - // application/vnd.docker.image.rootfs.diff.tar.gzip or - // application/vnd.oci.image.layer.v1.tar+gzip . - MediaType *string - - noSmithyDocumentSerde -} - -// An object that represents an Amazon ECR image layer failure. -type LayerFailure struct { - - // The failure code that's associated with the failure. - FailureCode LayerFailureCode - - // The reason for the failure. - FailureReason *string - - // The layer digest that's associated with the failure. - LayerDigest *string - - noSmithyDocumentSerde -} - -// An object that describes the image tag details that are returned by a -// DescribeImageTags action. -type ReferencedImageDetail struct { - - // The artifact media type of the image. - ArtifactMediaType *string - - // The sha256 digest of the image manifest. - ImageDigest *string - - // The media type of the image manifest. - ImageManifestMediaType *string - - // The date and time, expressed in standard JavaScript date format, which the - // current image tag was pushed to the repository at. - ImagePushedAt *time.Time - - // The size, in bytes, of the image in the repository. If the image is a manifest - // list, this is the max size of all manifests in the list. Beginning with Docker - // version 1.9, the Docker client compresses image layers before pushing them to a - // V2 Docker registry. The output of the docker images command shows the - // uncompressed image size, so it might return a larger image size than the image - // sizes that are returned by DescribeImages . - ImageSizeInBytes *int64 - - noSmithyDocumentSerde -} - -// The details of a public registry. -type Registry struct { - - // An array of objects that represents the aliases for a public registry. - // - // This member is required. - Aliases []RegistryAlias - - // The Amazon Resource Name (ARN) of the public registry. - // - // This member is required. - RegistryArn *string - - // The Amazon Web Services account ID that's associated with the registry. If you - // do not specify a registry, the default public registry is assumed. - // - // This member is required. - RegistryId *string - - // The URI of a public registry. The URI contains a universal prefix and the - // registry alias. - // - // This member is required. - RegistryUri *string - - // Indicates whether the account is a verified Amazon Web Services Marketplace - // vendor. If an account is verified, each public repository receives a verified - // account badge on the Amazon ECR Public Gallery. - // - // This member is required. - Verified *bool - - noSmithyDocumentSerde -} - -// An object representing the aliases for a public registry. A public registry is -// given an alias when it's created. However, a custom alias can be set using the -// Amazon ECR console. For more information, see Registries (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html) -// in the Amazon Elastic Container Registry User Guide. -type RegistryAlias struct { - - // Indicates whether the registry alias is the default alias for the registry. - // When the first public repository is created, your public registry is assigned a - // default registry alias. - // - // This member is required. - DefaultRegistryAlias bool - - // The name of the registry alias. - // - // This member is required. - Name *string - - // Indicates whether the registry alias is the primary alias for the registry. If - // true, the alias is the primary registry alias and is displayed in both the - // repository URL and the image URI used in the docker pull commands on the Amazon - // ECR Public Gallery. A registry alias that isn't the primary registry alias can - // be used in the repository URI in a docker pull command. - // - // This member is required. - PrimaryRegistryAlias bool - - // The status of the registry alias. - // - // This member is required. - Status RegistryAliasStatus - - noSmithyDocumentSerde -} - -// The metadata for a public registry. -type RegistryCatalogData struct { - - // The display name for a public registry. This appears on the Amazon ECR Public - // Gallery. Only accounts that have the verified account badge can have a registry - // display name. - DisplayName *string - - noSmithyDocumentSerde -} - -// An object representing a repository. -type Repository struct { - - // The date and time, in JavaScript date format, when the repository was created. - CreatedAt *time.Time - - // The Amazon Web Services account ID that's associated with the public registry - // that contains the repository. - RegistryId *string - - // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains - // the arn:aws:ecr namespace, followed by the region of the repository, Amazon Web - // Services account ID of the repository owner, repository namespace, and - // repository name. For example, arn:aws:ecr:region:012345678910:repository/test . - RepositoryArn *string - - // The name of the repository. - RepositoryName *string - - // The URI for the repository. You can use this URI for container image push and - // pull operations. - RepositoryUri *string - - noSmithyDocumentSerde -} - -// The catalog data for a repository. This data is publicly visible in the Amazon -// ECR Public Gallery. -type RepositoryCatalogData struct { - - // The longform description of the contents of the repository. This text appears - // in the repository details on the Amazon ECR Public Gallery. - AboutText *string - - // The architecture tags that are associated with the repository. Only supported - // operating system tags appear publicly in the Amazon ECR Public Gallery. For more - // information, see RepositoryCatalogDataInput . - Architectures []string - - // The short description of the repository. - Description *string - - // The URL that contains the logo that's associated with the repository. - LogoUrl *string - - // Indicates whether the repository is certified by Amazon Web Services - // Marketplace. - MarketplaceCertified *bool - - // The operating system tags that are associated with the repository. Only - // supported operating system tags appear publicly in the Amazon ECR Public - // Gallery. For more information, see RepositoryCatalogDataInput . - OperatingSystems []string - - // The longform usage details of the contents of the repository. The usage text - // provides context for users of the repository. - UsageText *string - - noSmithyDocumentSerde -} - -// An object that contains the catalog data for a repository. This data is -// publicly visible in the Amazon ECR Public Gallery. -type RepositoryCatalogDataInput struct { - - // A detailed description of the contents of the repository. It's publicly visible - // in the Amazon ECR Public Gallery. The text must be in markdown format. - AboutText *string - - // The system architecture that the images in the repository are compatible with. - // On the Amazon ECR Public Gallery, the following supported architectures appear - // as badges on the repository and are used as search filters. If an unsupported - // tag is added to your repository catalog data, it's associated with the - // repository and can be retrieved using the API but isn't discoverable in the - // Amazon ECR Public Gallery. - // - ARM - // - ARM 64 - // - x86 - // - x86-64 - Architectures []string - - // A short description of the contents of the repository. This text appears in - // both the image details and also when searching for repositories on the Amazon - // ECR Public Gallery. - Description *string - - // The base64-encoded repository logo payload. The repository logo is only - // publicly visible in the Amazon ECR Public Gallery for verified accounts. - LogoImageBlob []byte - - // The operating systems that the images in the repository are compatible with. On - // the Amazon ECR Public Gallery, the following supported operating systems appear - // as badges on the repository and are used as search filters. If an unsupported - // tag is added to your repository catalog data, it's associated with the - // repository and can be retrieved using the API but isn't discoverable in the - // Amazon ECR Public Gallery. - // - Linux - // - Windows - OperatingSystems []string - - // Detailed information about how to use the contents of the repository. It's - // publicly visible in the Amazon ECR Public Gallery. The usage text provides - // context, support information, and additional usage details for users of the - // repository. The text must be in markdown format. - UsageText *string - - noSmithyDocumentSerde -} - -// The metadata that you apply to a resource to help you categorize and organize -// them. Each tag consists of a key and an optional value. You define both. Tag -// keys can have a maximum character length of 128 characters, and tag values can -// have a maximum length of 256 characters. -type Tag struct { - - // One part of a key-value pair that make up a tag. A key is a general label that - // acts like a category for more specific tag values. - Key *string - - // The optional part of a key-value pair that make up a tag. A value acts as a - // descriptor within a tag category (key). - Value *string - - noSmithyDocumentSerde -} - -type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/validators.go deleted file mode 100644 index 76d60ad712..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/validators.go +++ /dev/null @@ -1,751 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ecrpublic - -import ( - "context" - "fmt" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -type validateOpBatchCheckLayerAvailability struct { -} - -func (*validateOpBatchCheckLayerAvailability) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpBatchCheckLayerAvailability) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*BatchCheckLayerAvailabilityInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpBatchCheckLayerAvailabilityInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpBatchDeleteImage struct { -} - -func (*validateOpBatchDeleteImage) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpBatchDeleteImage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*BatchDeleteImageInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpBatchDeleteImageInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCompleteLayerUpload struct { -} - -func (*validateOpCompleteLayerUpload) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCompleteLayerUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CompleteLayerUploadInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCompleteLayerUploadInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCreateRepository struct { -} - -func (*validateOpCreateRepository) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateRepository) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateRepositoryInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateRepositoryInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteRepository struct { -} - -func (*validateOpDeleteRepository) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteRepository) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteRepositoryInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteRepositoryInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteRepositoryPolicy struct { -} - -func (*validateOpDeleteRepositoryPolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteRepositoryPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteRepositoryPolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteRepositoryPolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDescribeImages struct { -} - -func (*validateOpDescribeImages) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDescribeImages) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DescribeImagesInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDescribeImagesInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDescribeImageTags struct { -} - -func (*validateOpDescribeImageTags) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDescribeImageTags) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DescribeImageTagsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDescribeImageTagsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetRepositoryCatalogData struct { -} - -func (*validateOpGetRepositoryCatalogData) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetRepositoryCatalogData) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetRepositoryCatalogDataInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetRepositoryCatalogDataInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetRepositoryPolicy struct { -} - -func (*validateOpGetRepositoryPolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetRepositoryPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetRepositoryPolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetRepositoryPolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpInitiateLayerUpload struct { -} - -func (*validateOpInitiateLayerUpload) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpInitiateLayerUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*InitiateLayerUploadInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpInitiateLayerUploadInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListTagsForResource struct { -} - -func (*validateOpListTagsForResource) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListTagsForResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListTagsForResourceInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListTagsForResourceInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutImage struct { -} - -func (*validateOpPutImage) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutImage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutImageInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutImageInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutRepositoryCatalogData struct { -} - -func (*validateOpPutRepositoryCatalogData) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutRepositoryCatalogData) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutRepositoryCatalogDataInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutRepositoryCatalogDataInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpSetRepositoryPolicy struct { -} - -func (*validateOpSetRepositoryPolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpSetRepositoryPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*SetRepositoryPolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpSetRepositoryPolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpTagResource struct { -} - -func (*validateOpTagResource) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*TagResourceInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpTagResourceInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpUntagResource struct { -} - -func (*validateOpUntagResource) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*UntagResourceInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpUntagResourceInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpUploadLayerPart struct { -} - -func (*validateOpUploadLayerPart) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpUploadLayerPart) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*UploadLayerPartInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpUploadLayerPartInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -func addOpBatchCheckLayerAvailabilityValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpBatchCheckLayerAvailability{}, middleware.After) -} - -func addOpBatchDeleteImageValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpBatchDeleteImage{}, middleware.After) -} - -func addOpCompleteLayerUploadValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCompleteLayerUpload{}, middleware.After) -} - -func addOpCreateRepositoryValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateRepository{}, middleware.After) -} - -func addOpDeleteRepositoryValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteRepository{}, middleware.After) -} - -func addOpDeleteRepositoryPolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteRepositoryPolicy{}, middleware.After) -} - -func addOpDescribeImagesValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDescribeImages{}, middleware.After) -} - -func addOpDescribeImageTagsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDescribeImageTags{}, middleware.After) -} - -func addOpGetRepositoryCatalogDataValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetRepositoryCatalogData{}, middleware.After) -} - -func addOpGetRepositoryPolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetRepositoryPolicy{}, middleware.After) -} - -func addOpInitiateLayerUploadValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpInitiateLayerUpload{}, middleware.After) -} - -func addOpListTagsForResourceValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListTagsForResource{}, middleware.After) -} - -func addOpPutImageValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutImage{}, middleware.After) -} - -func addOpPutRepositoryCatalogDataValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutRepositoryCatalogData{}, middleware.After) -} - -func addOpSetRepositoryPolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpSetRepositoryPolicy{}, middleware.After) -} - -func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpTagResource{}, middleware.After) -} - -func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) -} - -func addOpUploadLayerPartValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpUploadLayerPart{}, middleware.After) -} - -func validateOpBatchCheckLayerAvailabilityInput(v *BatchCheckLayerAvailabilityInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "BatchCheckLayerAvailabilityInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.LayerDigests == nil { - invalidParams.Add(smithy.NewErrParamRequired("LayerDigests")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpBatchDeleteImageInput(v *BatchDeleteImageInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "BatchDeleteImageInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.ImageIds == nil { - invalidParams.Add(smithy.NewErrParamRequired("ImageIds")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCompleteLayerUploadInput(v *CompleteLayerUploadInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CompleteLayerUploadInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.UploadId == nil { - invalidParams.Add(smithy.NewErrParamRequired("UploadId")) - } - if v.LayerDigests == nil { - invalidParams.Add(smithy.NewErrParamRequired("LayerDigests")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCreateRepositoryInput(v *CreateRepositoryInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateRepositoryInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteRepositoryInput(v *DeleteRepositoryInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteRepositoryInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteRepositoryPolicyInput(v *DeleteRepositoryPolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteRepositoryPolicyInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDescribeImagesInput(v *DescribeImagesInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DescribeImagesInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDescribeImageTagsInput(v *DescribeImageTagsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DescribeImageTagsInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetRepositoryCatalogDataInput(v *GetRepositoryCatalogDataInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetRepositoryCatalogDataInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetRepositoryPolicyInput(v *GetRepositoryPolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetRepositoryPolicyInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpInitiateLayerUploadInput(v *InitiateLayerUploadInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InitiateLayerUploadInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListTagsForResourceInput(v *ListTagsForResourceInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListTagsForResourceInput"} - if v.ResourceArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutImageInput(v *PutImageInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutImageInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.ImageManifest == nil { - invalidParams.Add(smithy.NewErrParamRequired("ImageManifest")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutRepositoryCatalogDataInput(v *PutRepositoryCatalogDataInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutRepositoryCatalogDataInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.CatalogData == nil { - invalidParams.Add(smithy.NewErrParamRequired("CatalogData")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpSetRepositoryPolicyInput(v *SetRepositoryPolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "SetRepositoryPolicyInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.PolicyText == nil { - invalidParams.Add(smithy.NewErrParamRequired("PolicyText")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpTagResourceInput(v *TagResourceInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"} - if v.ResourceArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) - } - if v.Tags == nil { - invalidParams.Add(smithy.NewErrParamRequired("Tags")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpUntagResourceInput(v *UntagResourceInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"} - if v.ResourceArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) - } - if v.TagKeys == nil { - invalidParams.Add(smithy.NewErrParamRequired("TagKeys")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpUploadLayerPartInput(v *UploadLayerPartInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "UploadLayerPartInput"} - if v.RepositoryName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) - } - if v.UploadId == nil { - invalidParams.Add(smithy.NewErrParamRequired("UploadId")) - } - if v.PartFirstByte == nil { - invalidParams.Add(smithy.NewErrParamRequired("PartFirstByte")) - } - if v.PartLastByte == nil { - invalidParams.Add(smithy.NewErrParamRequired("PartLastByte")) - } - if v.LayerPartBlob == nil { - invalidParams.Add(smithy.NewErrParamRequired("LayerPartBlob")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md deleted file mode 100644 index cac6f926eb..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ /dev/null @@ -1,132 +0,0 @@ -# v1.11.1 (2024-02-21) - -* No change notes available for this release. - -# v1.11.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. - -# v1.10.4 (2023-12-07) - -* No change notes available for this release. - -# v1.10.3 (2023-11-30) - -* No change notes available for this release. - -# v1.10.2 (2023-11-29) - -* No change notes available for this release. - -# v1.10.1 (2023-11-15) - -* No change notes available for this release. - -# v1.10.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). - -# v1.9.15 (2023-10-06) - -* No change notes available for this release. - -# v1.9.14 (2023-08-18) - -* No change notes available for this release. - -# v1.9.13 (2023-08-07) - -* No change notes available for this release. - -# v1.9.12 (2023-07-31) - -* No change notes available for this release. - -# v1.9.11 (2022-12-02) - -* No change notes available for this release. - -# v1.9.10 (2022-10-24) - -* No change notes available for this release. - -# v1.9.9 (2022-09-14) - -* No change notes available for this release. - -# v1.9.8 (2022-09-02) - -* No change notes available for this release. - -# v1.9.7 (2022-08-31) - -* No change notes available for this release. - -# v1.9.6 (2022-08-29) - -* No change notes available for this release. - -# v1.9.5 (2022-08-11) - -* No change notes available for this release. - -# v1.9.4 (2022-08-09) - -* No change notes available for this release. - -# v1.9.3 (2022-06-29) - -* No change notes available for this release. - -# v1.9.2 (2022-06-07) - -* No change notes available for this release. - -# v1.9.1 (2022-03-24) - -* No change notes available for this release. - -# v1.9.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.8.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.7.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.6.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.5.0 (2021-11-06) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.4.0 (2021-10-21) - -* **Feature**: Updated to latest version - -# v1.3.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.2.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. - -# v1.2.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version - -# v1.2.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.1.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go deleted file mode 100644 index 3f451fc9b4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go +++ /dev/null @@ -1,176 +0,0 @@ -package acceptencoding - -import ( - "compress/gzip" - "context" - "fmt" - "io" - - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const acceptEncodingHeaderKey = "Accept-Encoding" -const contentEncodingHeaderKey = "Content-Encoding" - -// AddAcceptEncodingGzipOptions provides the options for the -// AddAcceptEncodingGzip middleware setup. -type AddAcceptEncodingGzipOptions struct { - Enable bool -} - -// AddAcceptEncodingGzip explicitly adds handling for accept-encoding GZIP -// middleware to the operation stack. This allows checksums to be correctly -// computed without disabling GZIP support. -func AddAcceptEncodingGzip(stack *middleware.Stack, options AddAcceptEncodingGzipOptions) error { - if options.Enable { - if err := stack.Finalize.Add(&EnableGzip{}, middleware.Before); err != nil { - return err - } - if err := stack.Deserialize.Insert(&DecompressGzip{}, "OperationDeserializer", middleware.After); err != nil { - return err - } - return nil - } - - return stack.Finalize.Add(&DisableGzip{}, middleware.Before) -} - -// DisableGzip provides the middleware that will -// disable the underlying http client automatically enabling for gzip -// decompress content-encoding support. -type DisableGzip struct{} - -// ID returns the id for the middleware. -func (*DisableGzip) ID() string { - return "DisableAcceptEncodingGzip" -} - -// HandleFinalize implements the FinalizeMiddleware interface. -func (*DisableGzip) HandleFinalize( - ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - output middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := input.Request.(*smithyhttp.Request) - if !ok { - return output, metadata, &smithy.SerializationError{ - Err: fmt.Errorf("unknown request type %T", input.Request), - } - } - - // Explicitly enable gzip support, this will prevent the http client from - // auto extracting the zipped content. - req.Header.Set(acceptEncodingHeaderKey, "identity") - - return next.HandleFinalize(ctx, input) -} - -// EnableGzip provides a middleware to enable support for -// gzip responses, with manual decompression. This prevents the underlying HTTP -// client from performing the gzip decompression automatically. -type EnableGzip struct{} - -// ID returns the id for the middleware. -func (*EnableGzip) ID() string { - return "AcceptEncodingGzip" -} - -// HandleFinalize implements the FinalizeMiddleware interface. -func (*EnableGzip) HandleFinalize( - ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - output middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := input.Request.(*smithyhttp.Request) - if !ok { - return output, metadata, &smithy.SerializationError{ - Err: fmt.Errorf("unknown request type %T", input.Request), - } - } - - // Explicitly enable gzip support, this will prevent the http client from - // auto extracting the zipped content. - req.Header.Set(acceptEncodingHeaderKey, "gzip") - - return next.HandleFinalize(ctx, input) -} - -// DecompressGzip provides the middleware for decompressing a gzip -// response from the service. -type DecompressGzip struct{} - -// ID returns the id for the middleware. -func (*DecompressGzip) ID() string { - return "DecompressGzip" -} - -// HandleDeserialize implements the DeserializeMiddlware interface. -func (*DecompressGzip) HandleDeserialize( - ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - output middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - output, metadata, err = next.HandleDeserialize(ctx, input) - if err != nil { - return output, metadata, err - } - - resp, ok := output.RawResponse.(*smithyhttp.Response) - if !ok { - return output, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("unknown response type %T", output.RawResponse), - } - } - if v := resp.Header.Get(contentEncodingHeaderKey); v != "gzip" { - return output, metadata, err - } - - // Clear content length since it will no longer be valid once the response - // body is decompressed. - resp.Header.Del("Content-Length") - resp.ContentLength = -1 - - resp.Body = wrapGzipReader(resp.Body) - - return output, metadata, err -} - -type gzipReader struct { - reader io.ReadCloser - gzip *gzip.Reader -} - -func wrapGzipReader(reader io.ReadCloser) *gzipReader { - return &gzipReader{ - reader: reader, - } -} - -// Read wraps the gzip reader around the underlying io.Reader to extract the -// response bytes on the fly. -func (g *gzipReader) Read(b []byte) (n int, err error) { - if g.gzip == nil { - g.gzip, err = gzip.NewReader(g.reader) - if err != nil { - g.gzip = nil // ensure uninitialized gzip value isn't used in close. - return 0, fmt.Errorf("failed to decompress gzip response, %w", err) - } - } - - return g.gzip.Read(b) -} - -func (g *gzipReader) Close() error { - if g.gzip == nil { - return nil - } - - if err := g.gzip.Close(); err != nil { - g.reader.Close() - return fmt.Errorf("failed to decompress gzip response, %w", err) - } - - return g.reader.Close() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go deleted file mode 100644 index 7056d9bf6f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Package acceptencoding provides customizations associated with Accept Encoding Header. - -# Accept encoding gzip - -The Go HTTP client automatically supports accept-encoding and content-encoding -gzip by default. This default behavior is not desired by the SDK, and prevents -validating the response body's checksum. To prevent this the SDK must manually -control usage of content-encoding gzip. - -To control content-encoding, the SDK must always set the `Accept-Encoding` -header to a value. This prevents the HTTP client from using gzip automatically. -When gzip is enabled on the API client, the SDK's customization will control -decompressing the gzip data in order to not break the checksum validation. When -gzip is disabled, the API client will disable gzip, preventing the HTTP -client's default behavior. - -An `EnableAcceptEncodingGzip` option may or may not be present depending on the client using -the below middleware. The option if present can be used to enable auto decompressing -gzip by the SDK. -*/ -package acceptencoding diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go deleted file mode 100644 index c5ae0f8735..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package acceptencoding - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md deleted file mode 100644 index ead169d5cf..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ /dev/null @@ -1,302 +0,0 @@ -# v1.11.6 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.5 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.4 (2024-03-05) - -* **Bug Fix**: Restore typo'd API `AddAsIsInternalPresigingMiddleware` as an alias for backwards compatibility. - -# v1.11.3 (2024-03-04) - -* **Bug Fix**: Correct a typo in internal AddAsIsPresigningMiddleware API. - -# v1.11.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.9 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.8 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.7 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.6 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.3 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.37 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.36 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.35 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.34 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.33 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.32 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.31 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.30 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.29 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.28 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.27 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.26 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.25 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.24 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.23 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.22 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.21 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.20 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.19 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.18 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.17 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.16 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.15 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.14 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.13 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.12 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.11 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.10 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.9 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.6 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-11-06) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go deleted file mode 100644 index 5d5286f92c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go +++ /dev/null @@ -1,56 +0,0 @@ -package presignedurl - -import ( - "context" - - "github.com/aws/smithy-go/middleware" -) - -// WithIsPresigning adds the isPresigning sentinel value to a context to signal -// that the middleware stack is using the presign flow. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func WithIsPresigning(ctx context.Context) context.Context { - return middleware.WithStackValue(ctx, isPresigningKey{}, true) -} - -// GetIsPresigning returns if the context contains the isPresigning sentinel -// value for presigning flows. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetIsPresigning(ctx context.Context) bool { - v, _ := middleware.GetStackValue(ctx, isPresigningKey{}).(bool) - return v -} - -type isPresigningKey struct{} - -// AddAsIsPresigningMiddleware adds a middleware to the head of the stack that -// will update the stack's context to be flagged as being invoked for the -// purpose of presigning. -func AddAsIsPresigningMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(asIsPresigningMiddleware{}, middleware.Before) -} - -// AddAsIsPresigingMiddleware is an alias for backwards compatibility. -// -// Deprecated: This API was released with a typo. Use -// [AddAsIsPresigningMiddleware] instead. -func AddAsIsPresigingMiddleware(stack *middleware.Stack) error { - return AddAsIsPresigningMiddleware(stack) -} - -type asIsPresigningMiddleware struct{} - -func (asIsPresigningMiddleware) ID() string { return "AsIsPresigningMiddleware" } - -func (asIsPresigningMiddleware) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - ctx = WithIsPresigning(ctx) - return next.HandleInitialize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go deleted file mode 100644 index 1b85375cf8..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package presignedurl provides the customizations for API clients to fill in -// presigned URLs into input parameters. -package presignedurl diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go deleted file mode 100644 index 98bea53bdd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package presignedurl - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.6" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go deleted file mode 100644 index 1e2f5c8122..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go +++ /dev/null @@ -1,110 +0,0 @@ -package presignedurl - -import ( - "context" - "fmt" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - - "github.com/aws/smithy-go/middleware" -) - -// URLPresigner provides the interface to presign the input parameters in to a -// presigned URL. -type URLPresigner interface { - // PresignURL presigns a URL. - PresignURL(ctx context.Context, srcRegion string, params interface{}) (*v4.PresignedHTTPRequest, error) -} - -// ParameterAccessor provides an collection of accessor to for retrieving and -// setting the values needed to PresignedURL generation -type ParameterAccessor struct { - // GetPresignedURL accessor points to a function that retrieves a presigned url if present - GetPresignedURL func(interface{}) (string, bool, error) - - // GetSourceRegion accessor points to a function that retrieves source region for presigned url - GetSourceRegion func(interface{}) (string, bool, error) - - // CopyInput accessor points to a function that takes in an input, and returns a copy. - CopyInput func(interface{}) (interface{}, error) - - // SetDestinationRegion accessor points to a function that sets destination region on api input struct - SetDestinationRegion func(interface{}, string) error - - // SetPresignedURL accessor points to a function that sets presigned url on api input struct - SetPresignedURL func(interface{}, string) error -} - -// Options provides the set of options needed by the presigned URL middleware. -type Options struct { - // Accessor are the parameter accessors used by this middleware - Accessor ParameterAccessor - - // Presigner is the URLPresigner used by the middleware - Presigner URLPresigner -} - -// AddMiddleware adds the Presign URL middleware to the middleware stack. -func AddMiddleware(stack *middleware.Stack, opts Options) error { - return stack.Initialize.Add(&presign{options: opts}, middleware.Before) -} - -// RemoveMiddleware removes the Presign URL middleware from the stack. -func RemoveMiddleware(stack *middleware.Stack) error { - _, err := stack.Initialize.Remove((*presign)(nil).ID()) - return err -} - -type presign struct { - options Options -} - -func (m *presign) ID() string { return "Presign" } - -func (m *presign) HandleInitialize( - ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, -) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - // If PresignedURL is already set ignore middleware. - if _, ok, err := m.options.Accessor.GetPresignedURL(input.Parameters); err != nil { - return out, metadata, fmt.Errorf("presign middleware failed, %w", err) - } else if ok { - return next.HandleInitialize(ctx, input) - } - - // If have source region is not set ignore middleware. - srcRegion, ok, err := m.options.Accessor.GetSourceRegion(input.Parameters) - if err != nil { - return out, metadata, fmt.Errorf("presign middleware failed, %w", err) - } else if !ok || len(srcRegion) == 0 { - return next.HandleInitialize(ctx, input) - } - - // Create a copy of the original input so the destination region value can - // be added. This ensures that value does not leak into the original - // request parameters. - paramCpy, err := m.options.Accessor.CopyInput(input.Parameters) - if err != nil { - return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) - } - - // Destination region is the API client's configured region. - dstRegion := awsmiddleware.GetRegion(ctx) - if err = m.options.Accessor.SetDestinationRegion(paramCpy, dstRegion); err != nil { - return out, metadata, fmt.Errorf("presign middleware failed, %w", err) - } - - presignedReq, err := m.options.Presigner.PresignURL(ctx, srcRegion, paramCpy) - if err != nil { - return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) - } - - // Update the original input with the presigned URL value. - if err = m.options.Accessor.SetPresignedURL(input.Parameters, presignedReq.URL); err != nil { - return out, metadata, fmt.Errorf("presign middleware failed, %w", err) - } - - return next.HandleInitialize(ctx, input) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md deleted file mode 100644 index 5a5083094b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ /dev/null @@ -1,409 +0,0 @@ -# v1.20.3 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.2 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.1 (2024-02-23) - -* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2024-02-22) - -* **Feature**: Add middleware stack snapshot tests. - -# v1.19.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.1 (2024-02-20) - -* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. - -# v1.19.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.7 (2024-01-18) - -* No change notes available for this release. - -# v1.18.6 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.5 (2023-12-08) - -* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. - -# v1.18.4 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.3 (2023-12-06) - -* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. - -# v1.18.2 (2023-12-01) - -* **Bug Fix**: Correct wrapping of errors in authentication workflow. -* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.1 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.0 (2023-11-29) - -* **Feature**: Expose Options() accessor on service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.4 (2023-11-28) - -* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. - -# v1.17.3 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2023-10-02) - -* **Feature**: Fix FIPS Endpoints in aws-us-gov. - -# v1.14.1 (2023-09-22) - -* No change notes available for this release. - -# v1.14.0 (2023-09-18) - -* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. - -# v1.13.6 (2023-08-31) - -* No change notes available for this release. - -# v1.13.5 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2023-08-01) - -* No change notes available for this release. - -# v1.13.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.14 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.13 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.12 (2023-06-15) - -* No change notes available for this release. - -# v1.12.11 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.10 (2023-05-04) - -* No change notes available for this release. - -# v1.12.9 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.8 (2023-04-10) - -* No change notes available for this release. - -# v1.12.7 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.6 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.5 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.4 (2023-02-22) - -* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. - -# v1.12.3 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.2 (2023-02-15) - -* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. -* **Bug Fix**: Correct error type parsing for restJson services. - -# v1.12.1 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2023-01-05) - -* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - -# v1.11.28 (2022-12-20) - -* No change notes available for this release. - -# v1.11.27 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.26 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.25 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.24 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.23 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.22 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.21 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.20 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.19 (2022-08-30) - -* **Documentation**: Documentation updates for the AWS IAM Identity Center Portal CLI Reference. - -# v1.11.18 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.17 (2022-08-15) - -* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) - -# v1.11.16 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.15 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.14 (2022-08-08) - -* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.13 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.12 (2022-07-11) - -* No change notes available for this release. - -# v1.11.11 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.10 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.9 (2022-06-16) - -* No change notes available for this release. - -# v1.11.8 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.7 (2022-05-26) - -* No change notes available for this release. - -# v1.11.6 (2022-05-25) - -* No change notes available for this release. - -# v1.11.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-02-24) - -* **Feature**: API client updated -* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Documentation**: Updated API models -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-12-21) - -* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. - -# v1.6.2 (2021-12-02) - -* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Feature**: Updated service to latest API model. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.2 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go deleted file mode 100644 index fff457735b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ /dev/null @@ -1,537 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/defaults" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - smithy "github.com/aws/smithy-go" - smithydocument "github.com/aws/smithy-go/document" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net" - "net/http" - "time" -) - -const ServiceID = "SSO" -const ServiceAPIVersion = "2019-06-10" - -// Client provides the API client to make operations call for AWS Single Sign-On. -type Client struct { - options Options -} - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - resolveDefaultLogger(&options) - - setResolvedDefaultsMode(&options) - - resolveRetryer(&options) - - resolveHTTPClient(&options) - - resolveHTTPSignerV4(&options) - - resolveEndpointResolverV2(&options) - - resolveAuthSchemeResolver(&options) - - for _, fn := range optFns { - fn(&options) - } - - finalizeRetryMaxAttempts(&options) - - ignoreAnonymousAuth(&options) - - wrapWithAnonymousAuth(&options) - - resolveAuthSchemes(&options) - - client := &Client{ - options: options, - } - - return client -} - -// Options returns a copy of the client configuration. -// -// Callers SHOULD NOT perform mutations on any inner structures within client -// config. Config overrides should instead be made on a per-operation basis through -// functional options. -func (c *Client) Options() Options { - return c.options.Copy() -} - -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { - ctx = middleware.ClearStackValues(ctx) - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - finalizeOperationRetryMaxAttempts(&options, *c) - - finalizeClientEndpointResolverOptions(&options) - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) - if err != nil { - err = &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - return result, metadata, err -} - -type operationInputKey struct{} - -func setOperationInput(ctx context.Context, input interface{}) context.Context { - return middleware.WithStackValue(ctx, operationInputKey{}, input) -} - -func getOperationInput(ctx context.Context) interface{} { - return middleware.GetStackValue(ctx, operationInputKey{}) -} - -type setOperationInputMiddleware struct { -} - -func (*setOperationInputMiddleware) ID() string { - return "setOperationInput" -} - -func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - ctx = setOperationInput(ctx, in.Parameters) - return next.HandleSerialize(ctx, in) -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %v", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %v", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} -func resolveAuthSchemeResolver(options *Options) { - if options.AuthSchemeResolver == nil { - options.AuthSchemeResolver = &defaultAuthSchemeResolver{} - } -} - -func resolveAuthSchemes(options *Options) { - if options.AuthSchemes == nil { - options.AuthSchemes = []smithyhttp.AuthScheme{ - internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ - Signer: options.HTTPSignerV4, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - } - } -} - -type noSmithyDocumentSerde = smithydocument.NoSerde - -type legacyEndpointContextSetter struct { - LegacyResolver EndpointResolver -} - -func (*legacyEndpointContextSetter) ID() string { - return "legacyEndpointContextSetter" -} - -func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.LegacyResolver != nil { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) - } - - return next.HandleInitialize(ctx, in) - -} -func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { - return stack.Initialize.Add(&legacyEndpointContextSetter{ - LegacyResolver: o.EndpointResolver, - }, middleware.Before) -} - -func resolveDefaultLogger(o *Options) { - if o.Logger != nil { - return - } - o.Logger = logging.Nop{} -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -func setResolvedDefaultsMode(o *Options) { - if len(o.resolvedDefaultsMode) > 0 { - return - } - - var mode aws.DefaultsMode - mode.SetFromString(string(o.DefaultsMode)) - - if mode == aws.DefaultsModeAuto { - mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) - } - - o.resolvedDefaultsMode = mode -} - -// NewFromConfig returns a new client from the provided config. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - } - resolveAWSRetryerProvider(cfg, &opts) - resolveAWSRetryMaxAttempts(cfg, &opts) - resolveAWSRetryMode(cfg, &opts) - resolveAWSEndpointResolver(cfg, &opts) - resolveUseDualStackEndpoint(cfg, &opts) - resolveUseFIPSEndpoint(cfg, &opts) - resolveBaseEndpoint(cfg, &opts) - return New(opts, optFns...) -} - -func resolveHTTPClient(o *Options) { - var buildable *awshttp.BuildableClient - - if o.HTTPClient != nil { - var ok bool - buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return - } - } else { - buildable = awshttp.NewBuildableClient() - } - - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { - if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { - dialer.Timeout = dialerTimeout - } - }) - - buildable = buildable.WithTransportOptions(func(transport *http.Transport) { - if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { - transport.TLSHandshakeTimeout = tlsHandshakeTimeout - } - }) - } - - o.HTTPClient = buildable -} - -func resolveRetryer(o *Options) { - if o.Retryer != nil { - return - } - - if len(o.RetryMode) == 0 { - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - o.RetryMode = modeConfig.RetryMode - } - } - if len(o.RetryMode) == 0 { - o.RetryMode = aws.RetryModeStandard - } - - var standardOptions []func(*retry.StandardOptions) - if v := o.RetryMaxAttempts; v != 0 { - standardOptions = append(standardOptions, func(so *retry.StandardOptions) { - so.MaxAttempts = v - }) - } - - switch o.RetryMode { - case aws.RetryModeAdaptive: - var adaptiveOptions []func(*retry.AdaptiveModeOptions) - if len(standardOptions) != 0 { - adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { - ao.StandardOptions = append(ao.StandardOptions, standardOptions...) - }) - } - o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) - - default: - o.Retryer = retry.NewStandard(standardOptions...) - } -} - -func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { - if cfg.Retryer == nil { - return - } - o.Retryer = cfg.Retryer() -} - -func resolveAWSRetryMode(cfg aws.Config, o *Options) { - if len(cfg.RetryMode) == 0 { - return - } - o.RetryMode = cfg.RetryMode -} -func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { - if cfg.RetryMaxAttempts == 0 { - return - } - o.RetryMaxAttempts = cfg.RetryMaxAttempts -} - -func finalizeRetryMaxAttempts(o *Options) { - if o.RetryMaxAttempts == 0 { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func finalizeOperationRetryMaxAttempts(o *Options, client Client) { - if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { - if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { - return - } - o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) -} - -func addClientUserAgent(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion) - if len(options.AppID) > 0 { - ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) - } - - return nil -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { - id := (*awsmiddleware.RequestUserAgent)(nil).ID() - mw, ok := stack.Build.Get(id) - if !ok { - mw = awsmiddleware.NewRequestUserAgent() - if err := stack.Build.Add(mw, middleware.After); err != nil { - return nil, err - } - } - - ua, ok := mw.(*awsmiddleware.RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) - } - - return ua, nil -} - -type HTTPSignerV4 interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error -} - -func resolveHTTPSignerV4(o *Options) { - if o.HTTPSignerV4 != nil { - return - } - o.HTTPSignerV4 = newDefaultV4Signer(*o) -} - -func newDefaultV4Signer(o Options) *v4.Signer { - return v4.NewSigner(func(so *v4.SignerOptions) { - so.Logger = o.Logger - so.LogSigning = o.ClientLogMode.IsSigning() - }) -} - -func addClientRequestID(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) -} - -func addComputeContentLength(stack *middleware.Stack) error { - return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) -} - -func addRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) -} - -func addRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) -} -func addStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) -} - -func addUnsignedPayload(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -func addComputePayloadSHA256(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -func addContentSHA256Header(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) -} - -func addRetry(stack *middleware.Stack, o Options) error { - attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { - m.LogAttempts = o.ClientLogMode.IsRetries() - }) - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { - return err - } - if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { - return err - } - return nil -} - -// resolves dual-stack endpoint configuration -func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseDualStackEndpoint = value - } - return nil -} - -// resolves FIPS endpoint configuration -func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseFIPSEndpoint = value - } - return nil -} - -func addRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) -} - -func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) - -} - -func addResponseErrorMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) - -} - -func addRequestResponseLogging(stack *middleware.Stack, o Options) error { - return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: o.ClientLogMode.IsRequest(), - LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), - LogResponse: o.ClientLogMode.IsResponse(), - LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), - }, middleware.After) -} - -type disableHTTPSMiddleware struct { - DisableHTTPS bool -} - -func (*disableHTTPSMiddleware) ID() string { - return "disableHTTPS" -} - -func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { - req.URL.Scheme = "http" - } - - return next.HandleFinalize(ctx, in) -} - -func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Finalize.Insert(&disableHTTPSMiddleware{ - DisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "ResolveEndpointV2", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go deleted file mode 100644 index 4b21e8b00a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go +++ /dev/null @@ -1,146 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sso/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns the STS short-term credentials for a given role name that is assigned -// to the user. -func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredentialsInput, optFns ...func(*Options)) (*GetRoleCredentialsOutput, error) { - if params == nil { - params = &GetRoleCredentialsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetRoleCredentials", params, optFns, c.addOperationGetRoleCredentialsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetRoleCredentialsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetRoleCredentialsInput struct { - - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. - // - // This member is required. - AccessToken *string - - // The identifier for the AWS account that is assigned to the user. - // - // This member is required. - AccountId *string - - // The friendly name of the role that is assigned to the user. - // - // This member is required. - RoleName *string - - noSmithyDocumentSerde -} - -type GetRoleCredentialsOutput struct { - - // The credentials for the role that is assigned to the user. - RoleCredentials *types.RoleCredentials - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpGetRoleCredentials{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetRoleCredentials{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetRoleCredentials"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRoleCredentials(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetRoleCredentials(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetRoleCredentials", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go deleted file mode 100644 index e44da697c5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go +++ /dev/null @@ -1,241 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sso/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Lists all roles that are assigned to the user for a given AWS account. -func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesInput, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { - if params == nil { - params = &ListAccountRolesInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListAccountRoles", params, optFns, c.addOperationListAccountRolesMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListAccountRolesOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListAccountRolesInput struct { - - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. - // - // This member is required. - AccessToken *string - - // The identifier for the AWS account that is assigned to the user. - // - // This member is required. - AccountId *string - - // The number of items that clients can request per page. - MaxResults *int32 - - // The page token from the previous response output when you request subsequent - // pages. - NextToken *string - - noSmithyDocumentSerde -} - -type ListAccountRolesOutput struct { - - // The page token client that is used to retrieve the list of accounts. - NextToken *string - - // A paginated response with the list of roles and the next token if more results - // are available. - RoleList []types.RoleInfo - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccountRoles{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccountRoles{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccountRoles"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpListAccountRolesValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountRoles(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -// ListAccountRolesAPIClient is a client that implements the ListAccountRoles -// operation. -type ListAccountRolesAPIClient interface { - ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) -} - -var _ ListAccountRolesAPIClient = (*Client)(nil) - -// ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles -type ListAccountRolesPaginatorOptions struct { - // The number of items that clients can request per page. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListAccountRolesPaginator is a paginator for ListAccountRoles -type ListAccountRolesPaginator struct { - options ListAccountRolesPaginatorOptions - client ListAccountRolesAPIClient - params *ListAccountRolesInput - nextToken *string - firstPage bool -} - -// NewListAccountRolesPaginator returns a new ListAccountRolesPaginator -func NewListAccountRolesPaginator(client ListAccountRolesAPIClient, params *ListAccountRolesInput, optFns ...func(*ListAccountRolesPaginatorOptions)) *ListAccountRolesPaginator { - if params == nil { - params = &ListAccountRolesInput{} - } - - options := ListAccountRolesPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListAccountRolesPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListAccountRolesPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next ListAccountRoles page. -func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - result, err := p.client.ListAccountRoles(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListAccountRoles", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go deleted file mode 100644 index 2d7add067f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go +++ /dev/null @@ -1,238 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sso/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Lists all AWS accounts assigned to the user. These AWS accounts are assigned by -// the administrator of the account. For more information, see Assign User Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) -// in the IAM Identity Center User Guide. This operation returns a paginated -// response. -func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) { - if params == nil { - params = &ListAccountsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListAccounts", params, optFns, c.addOperationListAccountsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListAccountsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListAccountsInput struct { - - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. - // - // This member is required. - AccessToken *string - - // This is the number of items clients can request per page. - MaxResults *int32 - - // (Optional) When requesting subsequent pages, this is the page token from the - // previous response output. - NextToken *string - - noSmithyDocumentSerde -} - -type ListAccountsOutput struct { - - // A paginated response with the list of account information and the next token if - // more results are available. - AccountList []types.AccountInfo - - // The page token client that is used to retrieve the list of accounts. - NextToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccounts{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccounts{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccounts"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpListAccountsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccounts(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -// ListAccountsAPIClient is a client that implements the ListAccounts operation. -type ListAccountsAPIClient interface { - ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) -} - -var _ ListAccountsAPIClient = (*Client)(nil) - -// ListAccountsPaginatorOptions is the paginator options for ListAccounts -type ListAccountsPaginatorOptions struct { - // This is the number of items clients can request per page. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListAccountsPaginator is a paginator for ListAccounts -type ListAccountsPaginator struct { - options ListAccountsPaginatorOptions - client ListAccountsAPIClient - params *ListAccountsInput - nextToken *string - firstPage bool -} - -// NewListAccountsPaginator returns a new ListAccountsPaginator -func NewListAccountsPaginator(client ListAccountsAPIClient, params *ListAccountsInput, optFns ...func(*ListAccountsPaginatorOptions)) *ListAccountsPaginator { - if params == nil { - params = &ListAccountsInput{} - } - - options := ListAccountsPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListAccountsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListAccountsPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next ListAccounts page. -func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - result, err := p.client.ListAccounts(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListAccounts", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go deleted file mode 100644 index 3ee682d19e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go +++ /dev/null @@ -1,141 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Removes the locally stored SSO tokens from the client-side cache and sends an -// API call to the IAM Identity Center service to invalidate the corresponding -// server-side IAM Identity Center sign in session. If a user uses IAM Identity -// Center to access the AWS CLI, the user’s IAM Identity Center sign in session is -// used to obtain an IAM session, as specified in the corresponding IAM Identity -// Center permission set. More specifically, IAM Identity Center assumes an IAM -// role in the target account on behalf of the user, and the corresponding -// temporary AWS credentials are returned to the client. After user logout, any -// existing IAM role sessions that were created by using IAM Identity Center -// permission sets continue based on the duration configured in the permission set. -// For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) -// in the IAM Identity Center User Guide. -func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) { - if params == nil { - params = &LogoutInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "Logout", params, optFns, c.addOperationLogoutMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*LogoutOutput) - out.ResultMetadata = metadata - return out, nil -} - -type LogoutInput struct { - - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. - // - // This member is required. - AccessToken *string - - noSmithyDocumentSerde -} - -type LogoutOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpLogout{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpLogout{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "Logout"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpLogoutValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opLogout(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opLogout(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "Logout", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go deleted file mode 100644 index 3b28e825dd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go +++ /dev/null @@ -1,308 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { - params.Region = options.Region -} - -type setLegacyContextSigningOptionsMiddleware struct { -} - -func (*setLegacyContextSigningOptionsMiddleware) ID() string { - return "setLegacyContextSigningOptions" -} - -func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - schemeID := rscheme.Scheme.SchemeID() - - if sn := awsmiddleware.GetSigningName(ctx); sn != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) - } - } - - if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) - } - } - - return next.HandleFinalize(ctx, in) -} - -func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) -} - -type withAnonymous struct { - resolver AuthSchemeResolver -} - -var _ AuthSchemeResolver = (*withAnonymous)(nil) - -func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - opts, err := v.resolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return nil, err - } - - opts = append(opts, &smithyauth.Option{ - SchemeID: smithyauth.SchemeIDAnonymous, - }) - return opts, nil -} - -func wrapWithAnonymousAuth(options *Options) { - if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { - return - } - - options.AuthSchemeResolver = &withAnonymous{ - resolver: options.AuthSchemeResolver, - } -} - -// AuthResolverParameters contains the set of inputs necessary for auth scheme -// resolution. -type AuthResolverParameters struct { - // The name of the operation being invoked. - Operation string - - // The region in which the operation is being invoked. - Region string -} - -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { - params := &AuthResolverParameters{ - Operation: operation, - } - - bindAuthParamsRegion(params, input, options) - - return params -} - -// AuthSchemeResolver returns a set of possible authentication options for an -// operation. -type AuthSchemeResolver interface { - ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) -} - -type defaultAuthSchemeResolver struct{} - -var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) - -func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - if overrides, ok := operationAuthOptions[params.Operation]; ok { - return overrides(params), nil - } - return serviceAuthOptions(params), nil -} - -var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ - "GetRoleCredentials": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "ListAccountRoles": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "ListAccounts": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "Logout": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, -} - -func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "awsssoportal") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - return props - }(), - }, - } -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) - options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) - } - - scheme, ok := m.selectScheme(options) - if !ok { - return out, metadata, fmt.Errorf("could not select an auth scheme") - } - - ctx = setResolvedAuthScheme(ctx, scheme) - return next.HandleFinalize(ctx, in) -} - -func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - for _, option := range options { - if option.SchemeID == smithyauth.SchemeIDAnonymous { - return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true - } - - for _, scheme := range m.options.AuthSchemes { - if scheme.SchemeID() != option.SchemeID { - continue - } - - if scheme.IdentityResolver(m.options) != nil { - return newResolvedAuthScheme(scheme, option), true - } - } - } - - return nil, false -} - -type resolvedAuthSchemeKey struct{} - -type resolvedAuthScheme struct { - Scheme smithyhttp.AuthScheme - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { - return &resolvedAuthScheme{ - Scheme: scheme, - IdentityProperties: option.IdentityProperties, - SignerProperties: option.SignerProperties, - } -} - -func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { - return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) -} - -func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { - v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) - return v -} - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - resolver := rscheme.Scheme.IdentityResolver(m.options) - if resolver == nil { - return out, metadata, fmt.Errorf("no identity resolver") - } - - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) - if err != nil { - return out, metadata, fmt.Errorf("get identity: %w", err) - } - - ctx = setIdentity(ctx, identity) - return next.HandleFinalize(ctx, in) -} - -type identityKey struct{} - -func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { - return middleware.WithStackValue(ctx, identityKey{}, identity) -} - -func getIdentity(ctx context.Context) smithyauth.Identity { - v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) - return v -} - -type signRequestMiddleware struct { -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - identity := getIdentity(ctx) - if identity == nil { - return out, metadata, fmt.Errorf("no identity") - } - - signer := rscheme.Scheme.Signer() - if signer == nil { - return out, metadata, fmt.Errorf("no signer") - } - - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { - return out, metadata, fmt.Errorf("sign request: %w", err) - } - - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go deleted file mode 100644 index 8bba205f43..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go +++ /dev/null @@ -1,1151 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" - "github.com/aws/aws-sdk-go-v2/service/sso/types" - smithy "github.com/aws/smithy-go" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "io/ioutil" - "strings" -) - -type awsRestjson1_deserializeOpGetRoleCredentials struct { -} - -func (*awsRestjson1_deserializeOpGetRoleCredentials) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetRoleCredentials(response, &metadata) - } - output := &GetRoleCredentialsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - - case strings.EqualFold("TooManyRequestsException", errorCode): - return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) - - case strings.EqualFold("UnauthorizedException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(v **GetRoleCredentialsOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *GetRoleCredentialsOutput - if *v == nil { - sv = &GetRoleCredentialsOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "roleCredentials": - if err := awsRestjson1_deserializeDocumentRoleCredentials(&sv.RoleCredentials, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpListAccountRoles struct { -} - -func (*awsRestjson1_deserializeOpListAccountRoles) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListAccountRoles(response, &metadata) - } - output := &ListAccountRolesOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentListAccountRolesOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - - case strings.EqualFold("TooManyRequestsException", errorCode): - return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) - - case strings.EqualFold("UnauthorizedException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentListAccountRolesOutput(v **ListAccountRolesOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *ListAccountRolesOutput - if *v == nil { - sv = &ListAccountRolesOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - case "roleList": - if err := awsRestjson1_deserializeDocumentRoleListType(&sv.RoleList, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpListAccounts struct { -} - -func (*awsRestjson1_deserializeOpListAccounts) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListAccounts(response, &metadata) - } - output := &ListAccountsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentListAccountsOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - - case strings.EqualFold("TooManyRequestsException", errorCode): - return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) - - case strings.EqualFold("UnauthorizedException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentListAccountsOutput(v **ListAccountsOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *ListAccountsOutput - if *v == nil { - sv = &ListAccountsOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accountList": - if err := awsRestjson1_deserializeDocumentAccountListType(&sv.AccountList, value); err != nil { - return err - } - - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpLogout struct { -} - -func (*awsRestjson1_deserializeOpLogout) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorLogout(response, &metadata) - } - output := &LogoutOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("TooManyRequestsException", errorCode): - return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) - - case strings.EqualFold("UnauthorizedException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidRequestException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ResourceNotFoundException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentResourceNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorTooManyRequestsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.TooManyRequestsException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentTooManyRequestsException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorUnauthorizedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.UnauthorizedException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentUnauthorizedException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeDocumentAccountInfo(v **types.AccountInfo, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.AccountInfo - if *v == nil { - sv = &types.AccountInfo{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accountId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) - } - sv.AccountId = ptr.String(jtv) - } - - case "accountName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccountNameType to be of type string, got %T instead", value) - } - sv.AccountName = ptr.String(jtv) - } - - case "emailAddress": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected EmailAddressType to be of type string, got %T instead", value) - } - sv.EmailAddress = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentAccountListType(v *[]types.AccountInfo, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.AccountInfo - if *v == nil { - cv = []types.AccountInfo{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.AccountInfo - destAddr := &col - if err := awsRestjson1_deserializeDocumentAccountInfo(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidRequestException - if *v == nil { - sv = &types.InvalidRequestException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ResourceNotFoundException - if *v == nil { - sv = &types.ResourceNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentRoleCredentials(v **types.RoleCredentials, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RoleCredentials - if *v == nil { - sv = &types.RoleCredentials{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accessKeyId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccessKeyType to be of type string, got %T instead", value) - } - sv.AccessKeyId = ptr.String(jtv) - } - - case "expiration": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ExpirationTimestampType to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.Expiration = i64 - } - - case "secretAccessKey": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected SecretAccessKeyType to be of type string, got %T instead", value) - } - sv.SecretAccessKey = ptr.String(jtv) - } - - case "sessionToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected SessionTokenType to be of type string, got %T instead", value) - } - sv.SessionToken = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentRoleInfo(v **types.RoleInfo, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RoleInfo - if *v == nil { - sv = &types.RoleInfo{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accountId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) - } - sv.AccountId = ptr.String(jtv) - } - - case "roleName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RoleNameType to be of type string, got %T instead", value) - } - sv.RoleName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentRoleListType(v *[]types.RoleInfo, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.RoleInfo - if *v == nil { - cv = []types.RoleInfo{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.RoleInfo - destAddr := &col - if err := awsRestjson1_deserializeDocumentRoleInfo(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsRestjson1_deserializeDocumentTooManyRequestsException(v **types.TooManyRequestsException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.TooManyRequestsException - if *v == nil { - sv = &types.TooManyRequestsException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentUnauthorizedException(v **types.UnauthorizedException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnauthorizedException - if *v == nil { - sv = &types.UnauthorizedException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go deleted file mode 100644 index 59456d5dc2..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -// Package sso provides the API client, operations, and parameter types for AWS -// Single Sign-On. -// -// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web -// service that makes it easy for you to assign user access to IAM Identity Center -// resources such as the AWS access portal. Users can get AWS account applications -// and roles assigned to them and get federated into the application. Although AWS -// Single Sign-On was renamed, the sso and identitystore API namespaces will -// continue to retain their original name for backward compatibility purposes. For -// more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) -// . This reference guide describes the IAM Identity Center Portal operations that -// you can call programatically and includes detailed information on data types and -// errors. AWS provides SDKs that consist of libraries and sample code for various -// programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. -// The SDKs provide a convenient way to create programmatic access to IAM Identity -// Center and other AWS services. For more information about the AWS SDKs, -// including how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/) -// . -package sso diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go deleted file mode 100644 index 76521eec0e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go +++ /dev/null @@ -1,535 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - "github.com/aws/aws-sdk-go-v2/internal/endpoints" - "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" - internalendpoints "github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints" - smithyauth "github.com/aws/smithy-go/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "net/url" - "os" - "strings" -) - -// EndpointResolverOptions is the service endpoint resolver options -type EndpointResolverOptions = internalendpoints.Options - -// EndpointResolver interface for resolving service endpoints. -type EndpointResolver interface { - ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) -} - -var _ EndpointResolver = &internalendpoints.Resolver{} - -// NewDefaultEndpointResolver constructs a new service endpoint resolver -func NewDefaultEndpointResolver() *internalendpoints.Resolver { - return internalendpoints.New() -} - -// EndpointResolverFunc is a helper utility that wraps a function so it satisfies -// the EndpointResolver interface. This is useful when you want to add additional -// endpoint resolving logic, or stub out specific endpoints with custom values. -type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) - -func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return fn(region, options) -} - -// EndpointResolverFromURL returns an EndpointResolver configured using the -// provided endpoint url. By default, the resolved endpoint resolver uses the -// client region as signing region, and the endpoint source is set to -// EndpointSourceCustom.You can provide functional options to configure endpoint -// values for the resolved endpoint. -func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { - e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} - for _, fn := range optFns { - fn(&e) - } - - return EndpointResolverFunc( - func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { - if len(e.SigningRegion) == 0 { - e.SigningRegion = region - } - return e, nil - }, - ) -} - -type ResolveEndpoint struct { - Resolver EndpointResolver - Options EndpointResolverOptions -} - -func (*ResolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.Resolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - eo := m.Options - eo.Logger = middleware.GetLogger(ctx) - - var endpoint aws.Endpoint - endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) - if err != nil { - nf := (&aws.EndpointNotFoundError{}) - if errors.As(err, &nf) { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) - return next.HandleSerialize(ctx, in) - } - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(awsmiddleware.GetSigningName(ctx)) == 0 { - signingName := endpoint.SigningName - if len(signingName) == 0 { - signingName = "awsssoportal" - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - } - ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) - ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) - return next.HandleSerialize(ctx, in) -} -func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&ResolveEndpoint{ - Resolver: o.EndpointResolver, - Options: o.EndpointOptions, - }, "OperationSerializer", middleware.Before) -} - -func removeResolveEndpointMiddleware(stack *middleware.Stack) error { - _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) - return err -} - -type wrappedEndpointResolver struct { - awsResolver aws.EndpointResolverWithOptions -} - -func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return w.awsResolver.ResolveEndpoint(ServiceID, region, options) -} - -type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) - -func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { - return a(service, region) -} - -var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) - -// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. -// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, -// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked -// via its middleware. -// -// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. -func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { - var resolver aws.EndpointResolverWithOptions - - if awsResolverWithOptions != nil { - resolver = awsResolverWithOptions - } else if awsResolver != nil { - resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) - } - - return &wrappedEndpointResolver{ - awsResolver: resolver, - } -} - -func finalizeClientEndpointResolverOptions(options *Options) { - options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() - - if len(options.EndpointOptions.ResolvedRegion) == 0 { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(options.Region, fipsInfix) || - strings.Contains(options.Region, fipsPrefix) || - strings.Contains(options.Region, fipsSuffix) { - options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled - } - } - -} - -func resolveEndpointResolverV2(options *Options) { - if options.EndpointResolverV2 == nil { - options.EndpointResolverV2 = NewDefaultEndpointResolverV2() - } -} - -func resolveBaseEndpoint(cfg aws.Config, o *Options) { - if cfg.BaseEndpoint != nil { - o.BaseEndpoint = cfg.BaseEndpoint - } - - _, g := os.LookupEnv("AWS_ENDPOINT_URL") - _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO") - - if g && !s { - return - } - - value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO", cfg.ConfigSources) - if found && err == nil { - o.BaseEndpoint = &value - } -} - -func bindRegion(region string) *string { - if region == "" { - return nil - } - return aws.String(endpoints.MapFIPSRegion(region)) -} - -// EndpointParameters provides the parameters that influence how endpoints are -// resolved. -type EndpointParameters struct { - // The AWS region used to dispatch the request. - // - // Parameter is - // required. - // - // AWS::Region - Region *string - - // When true, use the dual-stack endpoint. If the configured endpoint does not - // support dual-stack, dispatching the request MAY return an error. - // - // Defaults to - // false if no value is provided. - // - // AWS::UseDualStack - UseDualStack *bool - - // When true, send this request to the FIPS-compliant regional endpoint. If the - // configured endpoint does not have a FIPS compliant endpoint, dispatching the - // request will return an error. - // - // Defaults to false if no value is - // provided. - // - // AWS::UseFIPS - UseFIPS *bool - - // Override the endpoint used to send this request - // - // Parameter is - // required. - // - // SDK::Endpoint - Endpoint *string -} - -// ValidateRequired validates required parameters are set. -func (p EndpointParameters) ValidateRequired() error { - if p.UseDualStack == nil { - return fmt.Errorf("parameter UseDualStack is required") - } - - if p.UseFIPS == nil { - return fmt.Errorf("parameter UseFIPS is required") - } - - return nil -} - -// WithDefaults returns a shallow copy of EndpointParameterswith default values -// applied to members where applicable. -func (p EndpointParameters) WithDefaults() EndpointParameters { - if p.UseDualStack == nil { - p.UseDualStack = ptr.Bool(false) - } - - if p.UseFIPS == nil { - p.UseFIPS = ptr.Bool(false) - } - return p -} - -// EndpointResolverV2 provides the interface for resolving service endpoints. -type EndpointResolverV2 interface { - // ResolveEndpoint attempts to resolve the endpoint with the provided options, - // returning the endpoint if found. Otherwise an error is returned. - ResolveEndpoint(ctx context.Context, params EndpointParameters) ( - smithyendpoints.Endpoint, error, - ) -} - -// resolver provides the implementation for resolving endpoints. -type resolver struct{} - -func NewDefaultEndpointResolverV2() EndpointResolverV2 { - return &resolver{} -} - -// ResolveEndpoint attempts to resolve the endpoint with the provided options, -// returning the endpoint if found. Otherwise an error is returned. -func (r *resolver) ResolveEndpoint( - ctx context.Context, params EndpointParameters, -) ( - endpoint smithyendpoints.Endpoint, err error, -) { - params = params.WithDefaults() - if err = params.ValidateRequired(); err != nil { - return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) - } - _UseDualStack := *params.UseDualStack - _UseFIPS := *params.UseFIPS - - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") - } - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") - } - uriString := _Endpoint - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _PartitionResult := *exprVal - _ = _PartitionResult - if _UseFIPS == true { - if _UseDualStack == true { - if true == _PartitionResult.SupportsFIPS { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://portal.sso-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") - } - } - if _UseFIPS == true { - if true == _PartitionResult.SupportsFIPS { - if "aws-us-gov" == _PartitionResult.Name { - uriString := func() string { - var out strings.Builder - out.WriteString("https://portal.sso.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://portal.sso-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") - } - if _UseDualStack == true { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://portal.sso.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://portal.sso.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") -} - -type endpointParamsBinder interface { - bindEndpointParams(*EndpointParameters) -} - -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { - params := &EndpointParameters{} - - params.Region = bindRegion(options.Region) - params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) - params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) - params.Endpoint = options.BaseEndpoint - - if b, ok := input.(endpointParamsBinder); ok { - b.bindEndpointParams(params) - } - - return params -} - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.options.EndpointResolverV2 == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := bindEndpointParams(getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - if endpt.URI.RawPath == "" && req.URL.RawPath != "" { - endpt.URI.RawPath = endpt.URI.Path - } - req.URL.Scheme = endpt.URI.Scheme - req.URL.Host = endpt.URI.Host - req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) - req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) - for k := range endpt.Headers { - req.Header.Set(k, endpt.Headers.Get(k)) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) - for _, o := range opts { - rscheme.SignerProperties.SetAll(&o.SignerProperties) - } - - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json deleted file mode 100644 index 936253d7ca..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "dependencies": { - "github.com/aws/aws-sdk-go-v2": "v1.4.0", - "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/smithy-go": "v1.4.0" - }, - "files": [ - "api_client.go", - "api_client_test.go", - "api_op_GetRoleCredentials.go", - "api_op_ListAccountRoles.go", - "api_op_ListAccounts.go", - "api_op_Logout.go", - "auth.go", - "deserializers.go", - "doc.go", - "endpoints.go", - "endpoints_config_test.go", - "endpoints_test.go", - "generated.json", - "internal/endpoints/endpoints.go", - "internal/endpoints/endpoints_test.go", - "options.go", - "protocol_test.go", - "serializers.go", - "snapshot_test.go", - "types/errors.go", - "types/types.go", - "validators.go" - ], - "go": "1.15", - "module": "github.com/aws/aws-sdk-go-v2/service/sso", - "unstable": false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go deleted file mode 100644 index e98c0f328e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package sso - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.20.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go deleted file mode 100644 index 0a00b256e1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go +++ /dev/null @@ -1,542 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package endpoints - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" - "github.com/aws/smithy-go/logging" - "regexp" -) - -// Options is the endpoint resolver configuration options -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the - // provided logger. - LogDeprecated bool - - // ResolvedRegion is used to override the region to be resolved, rather then the - // using the value passed to the ResolveEndpoint method. This value is used by the - // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative - // name. You must not set this value directly in your application. - ResolvedRegion string - - // DisableHTTPS informs the resolver to return an endpoint that does not use the - // HTTPS scheme. - DisableHTTPS bool - - // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. - UseDualStackEndpoint aws.DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint aws.FIPSEndpointState -} - -func (o Options) GetResolvedRegion() string { - return o.ResolvedRegion -} - -func (o Options) GetDisableHTTPS() bool { - return o.DisableHTTPS -} - -func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { - return o.UseDualStackEndpoint -} - -func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { - return o.UseFIPSEndpoint -} - -func transformToSharedOptions(options Options) endpoints.Options { - return endpoints.Options{ - Logger: options.Logger, - LogDeprecated: options.LogDeprecated, - ResolvedRegion: options.ResolvedRegion, - DisableHTTPS: options.DisableHTTPS, - UseDualStackEndpoint: options.UseDualStackEndpoint, - UseFIPSEndpoint: options.UseFIPSEndpoint, - } -} - -// Resolver SSO endpoint resolver -type Resolver struct { - partitions endpoints.Partitions -} - -// ResolveEndpoint resolves the service endpoint for the given region and options -func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { - if len(region) == 0 { - return endpoint, &aws.MissingRegionError{} - } - - opt := transformToSharedOptions(options) - return r.partitions.ResolveEndpoint(region, opt) -} - -// New returns a new Resolver -func New() *Resolver { - return &Resolver{ - partitions: defaultPartitions, - } -} - -var partitionRegexp = struct { - Aws *regexp.Regexp - AwsCn *regexp.Regexp - AwsIso *regexp.Regexp - AwsIsoB *regexp.Regexp - AwsIsoE *regexp.Regexp - AwsIsoF *regexp.Regexp - AwsUsGov *regexp.Regexp -}{ - - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), - AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), - AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), - AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), - AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), - AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), - AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), -} - -var defaultPartitions = endpoints.Partitions{ - { - ID: "aws", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "portal.sso.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "portal.sso-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.Aws, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "af-south-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.af-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "af-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-east-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-northeast-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-northeast-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-3", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-northeast-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-3", - }, - }, - endpoints.EndpointKey{ - Region: "ap-south-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-southeast-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-southeast-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-3", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-southeast-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-3", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-4", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-southeast-4.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-4", - }, - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ca-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-central-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-central-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-central-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-central-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-north-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-north-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-north-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-south-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-3", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-west-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-3", - }, - }, - endpoints.EndpointKey{ - Region: "il-central-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.il-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "il-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "me-central-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.me-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "me-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "me-south-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.me-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "me-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "sa-east-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.sa-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "sa-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - { - ID: "aws-cn", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "portal.sso.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "portal.sso-fips.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsCn, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "cn-north-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", - CredentialScope: endpoints.CredentialScope{ - Region: "cn-north-1", - }, - }, - endpoints.EndpointKey{ - Region: "cn-northwest-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", - CredentialScope: endpoints.CredentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - { - ID: "aws-iso", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIso, - IsRegionalized: true, - }, - { - ID: "aws-iso-b", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoB, - IsRegionalized: true, - }, - { - ID: "aws-iso-e", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoE, - IsRegionalized: true, - }, - { - ID: "aws-iso-f", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoF, - IsRegionalized: true, - }, - { - ID: "aws-us-gov", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "portal.sso.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "portal.sso-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsUsGov, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-gov-east-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go deleted file mode 100644 index 5dee7e53f4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go +++ /dev/null @@ -1,217 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" -) - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. - EndpointResolverV2 EndpointResolverV2 - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // The auth scheme resolver which determines how to authenticate for each - // operation. - AuthSchemeResolver AuthSchemeResolver - - // The list of auth schemes supported by the client. - AuthSchemes []smithyhttp.AuthScheme -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - - return to -} - -func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { - if schemeID == "aws.auth#sigv4" { - return getSigV4IdentityResolver(o) - } - if schemeID == "smithy.api#noAuth" { - return &smithyauth.AnonymousIdentityResolver{} - } - return nil -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { - if o.Credentials != nil { - return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} - } - return nil -} - -// WithSigV4SigningName applies an override to the authentication workflow to -// use the given signing name for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing name from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningName(name string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), - middleware.Before, - ) - }) - } -} - -// WithSigV4SigningRegion applies an override to the authentication workflow to -// use the given signing region for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing region from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningRegion(region string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), - middleware.Before, - ) - }) - } -} - -func ignoreAnonymousAuth(options *Options) { - if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { - options.Credentials = nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go deleted file mode 100644 index 02e3141156..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go +++ /dev/null @@ -1,284 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/encoding/httpbinding" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -type awsRestjson1_serializeOpGetRoleCredentials struct { -} - -func (*awsRestjson1_serializeOpGetRoleCredentials) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetRoleCredentialsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/federation/credentials") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCredentialsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.AccessToken != nil && len(*v.AccessToken) > 0 { - locationName := "X-Amz-Sso_bearer_token" - encoder.SetHeader(locationName).String(*v.AccessToken) - } - - if v.AccountId != nil { - encoder.SetQuery("account_id").String(*v.AccountId) - } - - if v.RoleName != nil { - encoder.SetQuery("role_name").String(*v.RoleName) - } - - return nil -} - -type awsRestjson1_serializeOpListAccountRoles struct { -} - -func (*awsRestjson1_serializeOpListAccountRoles) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListAccountRolesInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/assignment/roles") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRolesInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.AccessToken != nil && len(*v.AccessToken) > 0 { - locationName := "X-Amz-Sso_bearer_token" - encoder.SetHeader(locationName).String(*v.AccessToken) - } - - if v.AccountId != nil { - encoder.SetQuery("account_id").String(*v.AccountId) - } - - if v.MaxResults != nil { - encoder.SetQuery("max_result").Integer(*v.MaxResults) - } - - if v.NextToken != nil { - encoder.SetQuery("next_token").String(*v.NextToken) - } - - return nil -} - -type awsRestjson1_serializeOpListAccounts struct { -} - -func (*awsRestjson1_serializeOpListAccounts) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListAccountsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/assignment/accounts") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestjson1_serializeOpHttpBindingsListAccountsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.AccessToken != nil && len(*v.AccessToken) > 0 { - locationName := "X-Amz-Sso_bearer_token" - encoder.SetHeader(locationName).String(*v.AccessToken) - } - - if v.MaxResults != nil { - encoder.SetQuery("max_result").Integer(*v.MaxResults) - } - - if v.NextToken != nil { - encoder.SetQuery("next_token").String(*v.NextToken) - } - - return nil -} - -type awsRestjson1_serializeOpLogout struct { -} - -func (*awsRestjson1_serializeOpLogout) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*LogoutInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/logout") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestjson1_serializeOpHttpBindingsLogoutInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.AccessToken != nil && len(*v.AccessToken) > 0 { - locationName := "X-Amz-Sso_bearer_token" - encoder.SetHeader(locationName).String(*v.AccessToken) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go deleted file mode 100644 index e97a126e8b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go +++ /dev/null @@ -1,115 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - "fmt" - smithy "github.com/aws/smithy-go" -) - -// Indicates that a problem occurred with the input to the request. For example, a -// required parameter might be missing or out of range. -type InvalidRequestException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidRequestException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidRequestException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidRequestException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidRequestException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified resource doesn't exist. -type ResourceNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ResourceNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ResourceNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ResourceNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ResourceNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the request is being made too frequently and is more than what -// the server can handle. -type TooManyRequestsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *TooManyRequestsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *TooManyRequestsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *TooManyRequestsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "TooManyRequestsException" - } - return *e.ErrorCodeOverride -} -func (e *TooManyRequestsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. -type UnauthorizedException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *UnauthorizedException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnauthorizedException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnauthorizedException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnauthorizedException" - } - return *e.ErrorCodeOverride -} -func (e *UnauthorizedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go deleted file mode 100644 index 8dc02296b1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go +++ /dev/null @@ -1,61 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - smithydocument "github.com/aws/smithy-go/document" -) - -// Provides information about your AWS account. -type AccountInfo struct { - - // The identifier of the AWS account that is assigned to the user. - AccountId *string - - // The display name of the AWS account that is assigned to the user. - AccountName *string - - // The email address of the AWS account that is assigned to the user. - EmailAddress *string - - noSmithyDocumentSerde -} - -// Provides information about the role credentials that are assigned to the user. -type RoleCredentials struct { - - // The identifier used for the temporary security credentials. For more - // information, see Using Temporary Security Credentials to Request Access to AWS - // Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. - AccessKeyId *string - - // The date on which temporary security credentials expire. - Expiration int64 - - // The key that is used to sign the request. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. - SecretAccessKey *string - - // The token used for temporary credentials. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. - SessionToken *string - - noSmithyDocumentSerde -} - -// Provides information about the role that is assigned to the user. -type RoleInfo struct { - - // The identifier of the AWS account assigned to the user. - AccountId *string - - // The friendly name of the role that is assigned to the user. - RoleName *string - - noSmithyDocumentSerde -} - -type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go deleted file mode 100644 index f6bf461f74..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go +++ /dev/null @@ -1,175 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -type validateOpGetRoleCredentials struct { -} - -func (*validateOpGetRoleCredentials) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetRoleCredentials) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetRoleCredentialsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetRoleCredentialsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListAccountRoles struct { -} - -func (*validateOpListAccountRoles) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListAccountRoles) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListAccountRolesInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListAccountRolesInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListAccounts struct { -} - -func (*validateOpListAccounts) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListAccounts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListAccountsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListAccountsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpLogout struct { -} - -func (*validateOpLogout) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpLogout) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*LogoutInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpLogoutInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -func addOpGetRoleCredentialsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetRoleCredentials{}, middleware.After) -} - -func addOpListAccountRolesValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListAccountRoles{}, middleware.After) -} - -func addOpListAccountsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListAccounts{}, middleware.After) -} - -func addOpLogoutValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpLogout{}, middleware.After) -} - -func validateOpGetRoleCredentialsInput(v *GetRoleCredentialsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetRoleCredentialsInput"} - if v.RoleName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleName")) - } - if v.AccountId == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccountId")) - } - if v.AccessToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListAccountRolesInput(v *ListAccountRolesInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListAccountRolesInput"} - if v.AccessToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) - } - if v.AccountId == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccountId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListAccountsInput(v *ListAccountsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListAccountsInput"} - if v.AccessToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpLogoutInput(v *LogoutInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LogoutInput"} - if v.AccessToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md deleted file mode 100644 index c6d5ae92e3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ /dev/null @@ -1,403 +0,0 @@ -# v1.23.3 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.2 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.1 (2024-02-23) - -* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.0 (2024-02-22) - -* **Feature**: Add middleware stack snapshot tests. - -# v1.22.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.1 (2024-02-20) - -* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. - -# v1.22.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.7 (2024-01-16) - -* No change notes available for this release. - -# v1.21.6 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.5 (2023-12-08) - -* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. - -# v1.21.4 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.3 (2023-12-06) - -* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. - -# v1.21.2 (2023-12-01) - -* **Bug Fix**: Correct wrapping of errors in authentication workflow. -* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.1 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.0 (2023-11-29) - -* **Feature**: Expose Options() accessor on service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.3 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.2 (2023-11-28) - -* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. - -# v1.20.1 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2023-11-17) - -* **Feature**: Adding support for `sso-oauth:CreateTokenWithIAM`. - -# v1.19.2 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.1 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.3 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2023-09-22) - -* No change notes available for this release. - -# v1.17.0 (2023-09-20) - -* **Feature**: Update FIPS endpoints in aws-us-gov. - -# v1.16.0 (2023-09-18) - -* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. - -# v1.15.6 (2023-09-05) - -* No change notes available for this release. - -# v1.15.5 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.4 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.3 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2023-08-01) - -* No change notes available for this release. - -# v1.15.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.14 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.13 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.12 (2023-06-15) - -* No change notes available for this release. - -# v1.14.11 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.10 (2023-05-04) - -* No change notes available for this release. - -# v1.14.9 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.8 (2023-04-10) - -* No change notes available for this release. - -# v1.14.7 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.6 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.5 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.4 (2023-02-22) - -* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. - -# v1.14.3 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.2 (2023-02-15) - -* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. -* **Bug Fix**: Correct error type parsing for restJson services. - -# v1.14.1 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2023-01-05) - -* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - -# v1.13.11 (2022-12-19) - -* No change notes available for this release. - -# v1.13.10 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.9 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.8 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.7 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.6 (2022-09-30) - -* **Documentation**: Documentation updates for the IAM Identity Center OIDC CLI Reference. - -# v1.13.5 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-08-25) - -* **Feature**: Updated required request parameters on IAM Identity Center's OIDC CreateToken action. - -# v1.12.14 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.13 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.12 (2022-08-08) - -* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.11 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.10 (2022-07-11) - -* No change notes available for this release. - -# v1.12.9 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.8 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.7 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.6 (2022-05-27) - -* No change notes available for this release. - -# v1.12.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-02-24) - -* **Feature**: API client updated -* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-01-07) - -* **Feature**: API client updated -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.2 (2021-12-02) - -* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-10-11) - -* **Feature**: API client updated -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-09-17) - -* **Feature**: Updated API client and endpoints to latest revision. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-08-27) - -* **Feature**: Updated API model to latest revision. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go deleted file mode 100644 index 8dc643bb0c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ /dev/null @@ -1,537 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/defaults" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - smithy "github.com/aws/smithy-go" - smithydocument "github.com/aws/smithy-go/document" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net" - "net/http" - "time" -) - -const ServiceID = "SSO OIDC" -const ServiceAPIVersion = "2019-06-10" - -// Client provides the API client to make operations call for AWS SSO OIDC. -type Client struct { - options Options -} - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - resolveDefaultLogger(&options) - - setResolvedDefaultsMode(&options) - - resolveRetryer(&options) - - resolveHTTPClient(&options) - - resolveHTTPSignerV4(&options) - - resolveEndpointResolverV2(&options) - - resolveAuthSchemeResolver(&options) - - for _, fn := range optFns { - fn(&options) - } - - finalizeRetryMaxAttempts(&options) - - ignoreAnonymousAuth(&options) - - wrapWithAnonymousAuth(&options) - - resolveAuthSchemes(&options) - - client := &Client{ - options: options, - } - - return client -} - -// Options returns a copy of the client configuration. -// -// Callers SHOULD NOT perform mutations on any inner structures within client -// config. Config overrides should instead be made on a per-operation basis through -// functional options. -func (c *Client) Options() Options { - return c.options.Copy() -} - -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { - ctx = middleware.ClearStackValues(ctx) - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - finalizeOperationRetryMaxAttempts(&options, *c) - - finalizeClientEndpointResolverOptions(&options) - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) - if err != nil { - err = &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - return result, metadata, err -} - -type operationInputKey struct{} - -func setOperationInput(ctx context.Context, input interface{}) context.Context { - return middleware.WithStackValue(ctx, operationInputKey{}, input) -} - -func getOperationInput(ctx context.Context) interface{} { - return middleware.GetStackValue(ctx, operationInputKey{}) -} - -type setOperationInputMiddleware struct { -} - -func (*setOperationInputMiddleware) ID() string { - return "setOperationInput" -} - -func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - ctx = setOperationInput(ctx, in.Parameters) - return next.HandleSerialize(ctx, in) -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %v", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %v", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} -func resolveAuthSchemeResolver(options *Options) { - if options.AuthSchemeResolver == nil { - options.AuthSchemeResolver = &defaultAuthSchemeResolver{} - } -} - -func resolveAuthSchemes(options *Options) { - if options.AuthSchemes == nil { - options.AuthSchemes = []smithyhttp.AuthScheme{ - internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ - Signer: options.HTTPSignerV4, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - } - } -} - -type noSmithyDocumentSerde = smithydocument.NoSerde - -type legacyEndpointContextSetter struct { - LegacyResolver EndpointResolver -} - -func (*legacyEndpointContextSetter) ID() string { - return "legacyEndpointContextSetter" -} - -func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.LegacyResolver != nil { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) - } - - return next.HandleInitialize(ctx, in) - -} -func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { - return stack.Initialize.Add(&legacyEndpointContextSetter{ - LegacyResolver: o.EndpointResolver, - }, middleware.Before) -} - -func resolveDefaultLogger(o *Options) { - if o.Logger != nil { - return - } - o.Logger = logging.Nop{} -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -func setResolvedDefaultsMode(o *Options) { - if len(o.resolvedDefaultsMode) > 0 { - return - } - - var mode aws.DefaultsMode - mode.SetFromString(string(o.DefaultsMode)) - - if mode == aws.DefaultsModeAuto { - mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) - } - - o.resolvedDefaultsMode = mode -} - -// NewFromConfig returns a new client from the provided config. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - } - resolveAWSRetryerProvider(cfg, &opts) - resolveAWSRetryMaxAttempts(cfg, &opts) - resolveAWSRetryMode(cfg, &opts) - resolveAWSEndpointResolver(cfg, &opts) - resolveUseDualStackEndpoint(cfg, &opts) - resolveUseFIPSEndpoint(cfg, &opts) - resolveBaseEndpoint(cfg, &opts) - return New(opts, optFns...) -} - -func resolveHTTPClient(o *Options) { - var buildable *awshttp.BuildableClient - - if o.HTTPClient != nil { - var ok bool - buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return - } - } else { - buildable = awshttp.NewBuildableClient() - } - - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { - if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { - dialer.Timeout = dialerTimeout - } - }) - - buildable = buildable.WithTransportOptions(func(transport *http.Transport) { - if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { - transport.TLSHandshakeTimeout = tlsHandshakeTimeout - } - }) - } - - o.HTTPClient = buildable -} - -func resolveRetryer(o *Options) { - if o.Retryer != nil { - return - } - - if len(o.RetryMode) == 0 { - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - o.RetryMode = modeConfig.RetryMode - } - } - if len(o.RetryMode) == 0 { - o.RetryMode = aws.RetryModeStandard - } - - var standardOptions []func(*retry.StandardOptions) - if v := o.RetryMaxAttempts; v != 0 { - standardOptions = append(standardOptions, func(so *retry.StandardOptions) { - so.MaxAttempts = v - }) - } - - switch o.RetryMode { - case aws.RetryModeAdaptive: - var adaptiveOptions []func(*retry.AdaptiveModeOptions) - if len(standardOptions) != 0 { - adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { - ao.StandardOptions = append(ao.StandardOptions, standardOptions...) - }) - } - o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) - - default: - o.Retryer = retry.NewStandard(standardOptions...) - } -} - -func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { - if cfg.Retryer == nil { - return - } - o.Retryer = cfg.Retryer() -} - -func resolveAWSRetryMode(cfg aws.Config, o *Options) { - if len(cfg.RetryMode) == 0 { - return - } - o.RetryMode = cfg.RetryMode -} -func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { - if cfg.RetryMaxAttempts == 0 { - return - } - o.RetryMaxAttempts = cfg.RetryMaxAttempts -} - -func finalizeRetryMaxAttempts(o *Options) { - if o.RetryMaxAttempts == 0 { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func finalizeOperationRetryMaxAttempts(o *Options, client Client) { - if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { - if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { - return - } - o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) -} - -func addClientUserAgent(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion) - if len(options.AppID) > 0 { - ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) - } - - return nil -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { - id := (*awsmiddleware.RequestUserAgent)(nil).ID() - mw, ok := stack.Build.Get(id) - if !ok { - mw = awsmiddleware.NewRequestUserAgent() - if err := stack.Build.Add(mw, middleware.After); err != nil { - return nil, err - } - } - - ua, ok := mw.(*awsmiddleware.RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) - } - - return ua, nil -} - -type HTTPSignerV4 interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error -} - -func resolveHTTPSignerV4(o *Options) { - if o.HTTPSignerV4 != nil { - return - } - o.HTTPSignerV4 = newDefaultV4Signer(*o) -} - -func newDefaultV4Signer(o Options) *v4.Signer { - return v4.NewSigner(func(so *v4.SignerOptions) { - so.Logger = o.Logger - so.LogSigning = o.ClientLogMode.IsSigning() - }) -} - -func addClientRequestID(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) -} - -func addComputeContentLength(stack *middleware.Stack) error { - return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) -} - -func addRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) -} - -func addRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) -} -func addStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) -} - -func addUnsignedPayload(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -func addComputePayloadSHA256(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -func addContentSHA256Header(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) -} - -func addRetry(stack *middleware.Stack, o Options) error { - attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { - m.LogAttempts = o.ClientLogMode.IsRetries() - }) - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { - return err - } - if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { - return err - } - return nil -} - -// resolves dual-stack endpoint configuration -func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseDualStackEndpoint = value - } - return nil -} - -// resolves FIPS endpoint configuration -func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseFIPSEndpoint = value - } - return nil -} - -func addRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) -} - -func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) - -} - -func addResponseErrorMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) - -} - -func addRequestResponseLogging(stack *middleware.Stack, o Options) error { - return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: o.ClientLogMode.IsRequest(), - LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), - LogResponse: o.ClientLogMode.IsResponse(), - LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), - }, middleware.After) -} - -type disableHTTPSMiddleware struct { - DisableHTTPS bool -} - -func (*disableHTTPSMiddleware) ID() string { - return "disableHTTPS" -} - -func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { - req.URL.Scheme = "http" - } - - return next.HandleFinalize(ctx, in) -} - -func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Finalize.Insert(&disableHTTPSMiddleware{ - DisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "ResolveEndpointV2", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go deleted file mode 100644 index 63f1eeb131..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go +++ /dev/null @@ -1,203 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates and returns access and refresh tokens for clients that are -// authenticated using client secrets. The access token can be used to fetch -// short-term credentials for the assigned AWS accounts or to access application -// APIs using bearer authentication. -func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optFns ...func(*Options)) (*CreateTokenOutput, error) { - if params == nil { - params = &CreateTokenInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateToken", params, optFns, c.addOperationCreateTokenMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateTokenOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateTokenInput struct { - - // The unique identifier string for the client or application. This value comes - // from the result of the RegisterClient API. - // - // This member is required. - ClientId *string - - // A secret string generated for the client. This value should come from the - // persisted result of the RegisterClient API. - // - // This member is required. - ClientSecret *string - - // Supports the following OAuth grant types: Device Code and Refresh Token. - // Specify either of the following values, depending on the grant type that you - // want: * Device Code - urn:ietf:params:oauth:grant-type:device_code * Refresh - // Token - refresh_token For information about how to obtain the device code, see - // the StartDeviceAuthorization topic. - // - // This member is required. - GrantType *string - - // Used only when calling this API for the Authorization Code grant type. The - // short-term code is used to identify this authorization request. This grant type - // is currently unsupported for the CreateToken API. - Code *string - - // Used only when calling this API for the Device Code grant type. This short-term - // code is used to identify this authorization request. This comes from the result - // of the StartDeviceAuthorization API. - DeviceCode *string - - // Used only when calling this API for the Authorization Code grant type. This - // value specifies the location of the client or application that has registered to - // receive the authorization code. - RedirectUri *string - - // Used only when calling this API for the Refresh Token grant type. This token is - // used to refresh short-term tokens, such as the access token, that might expire. - // For more information about the features and limitations of the current IAM - // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . - RefreshToken *string - - // The list of scopes for which authorization is requested. The access token that - // is issued is limited to the scopes that are granted. If this value is not - // specified, IAM Identity Center authorizes all scopes that are configured for the - // client during the call to RegisterClient . - Scope []string - - noSmithyDocumentSerde -} - -type CreateTokenOutput struct { - - // A bearer token to access AWS accounts and applications assigned to a user. - AccessToken *string - - // Indicates the time in seconds when an access token will expire. - ExpiresIn int32 - - // The idToken is not implemented or supported. For more information about the - // features and limitations of the current IAM Identity Center OIDC implementation, - // see Considerations for Using this Guide in the IAM Identity Center OIDC API - // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . A JSON Web Token (JWT) that identifies who is associated with the issued - // access token. - IdToken *string - - // A token that, if present, can be used to refresh a previously issued access - // token that might have expired. For more information about the features and - // limitations of the current IAM Identity Center OIDC implementation, see - // Considerations for Using this Guide in the IAM Identity Center OIDC API - // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . - RefreshToken *string - - // Used to notify the client that the returned token is an access token. The - // supported token type is Bearer . - TokenType *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateToken{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateToken{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateToken"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpCreateTokenValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateToken(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opCreateToken(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateToken", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go deleted file mode 100644 index 6340953894..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go +++ /dev/null @@ -1,229 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates and returns access and refresh tokens for clients and applications that -// are authenticated using IAM entities. The access token can be used to fetch -// short-term credentials for the assigned AWS accounts or to access application -// APIs using bearer authentication. -func (c *Client) CreateTokenWithIAM(ctx context.Context, params *CreateTokenWithIAMInput, optFns ...func(*Options)) (*CreateTokenWithIAMOutput, error) { - if params == nil { - params = &CreateTokenWithIAMInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateTokenWithIAM", params, optFns, c.addOperationCreateTokenWithIAMMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateTokenWithIAMOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateTokenWithIAMInput struct { - - // The unique identifier string for the client or application. This value is an - // application ARN that has OAuth grants configured. - // - // This member is required. - ClientId *string - - // Supports the following OAuth grant types: Authorization Code, Refresh Token, - // JWT Bearer, and Token Exchange. Specify one of the following values, depending - // on the grant type that you want: * Authorization Code - authorization_code * - // Refresh Token - refresh_token * JWT Bearer - - // urn:ietf:params:oauth:grant-type:jwt-bearer * Token Exchange - - // urn:ietf:params:oauth:grant-type:token-exchange - // - // This member is required. - GrantType *string - - // Used only when calling this API for the JWT Bearer grant type. This value - // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To - // authorize a trusted token issuer, configure the JWT Bearer GrantOptions for the - // application. - Assertion *string - - // Used only when calling this API for the Authorization Code grant type. This - // short-term code is used to identify this authorization request. The code is - // obtained through a redirect from IAM Identity Center to a redirect URI persisted - // in the Authorization Code GrantOptions for the application. - Code *string - - // Used only when calling this API for the Authorization Code grant type. This - // value specifies the location of the client or application that has registered to - // receive the authorization code. - RedirectUri *string - - // Used only when calling this API for the Refresh Token grant type. This token is - // used to refresh short-term tokens, such as the access token, that might expire. - // For more information about the features and limitations of the current IAM - // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . - RefreshToken *string - - // Used only when calling this API for the Token Exchange grant type. This value - // specifies the type of token that the requester can receive. The following values - // are supported: * Access Token - urn:ietf:params:oauth:token-type:access_token * - // Refresh Token - urn:ietf:params:oauth:token-type:refresh_token - RequestedTokenType *string - - // The list of scopes for which authorization is requested. The access token that - // is issued is limited to the scopes that are granted. If the value is not - // specified, IAM Identity Center authorizes all scopes configured for the - // application, including the following default scopes: openid , aws , - // sts:identity_context . - Scope []string - - // Used only when calling this API for the Token Exchange grant type. This value - // specifies the subject of the exchange. The value of the subject token must be an - // access token issued by IAM Identity Center to a different client or application. - // The access token must have authorized scopes that indicate the requested - // application as a target audience. - SubjectToken *string - - // Used only when calling this API for the Token Exchange grant type. This value - // specifies the type of token that is passed as the subject of the exchange. The - // following value is supported: * Access Token - - // urn:ietf:params:oauth:token-type:access_token - SubjectTokenType *string - - noSmithyDocumentSerde -} - -type CreateTokenWithIAMOutput struct { - - // A bearer token to access AWS accounts and applications assigned to a user. - AccessToken *string - - // Indicates the time in seconds when an access token will expire. - ExpiresIn int32 - - // A JSON Web Token (JWT) that identifies the user associated with the issued - // access token. - IdToken *string - - // Indicates the type of tokens that are issued by IAM Identity Center. The - // following values are supported: * Access Token - - // urn:ietf:params:oauth:token-type:access_token * Refresh Token - - // urn:ietf:params:oauth:token-type:refresh_token - IssuedTokenType *string - - // A token that, if present, can be used to refresh a previously issued access - // token that might have expired. For more information about the features and - // limitations of the current IAM Identity Center OIDC implementation, see - // Considerations for Using this Guide in the IAM Identity Center OIDC API - // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . - RefreshToken *string - - // The list of scopes for which authorization is granted. The access token that is - // issued is limited to the scopes that are granted. - Scope []string - - // Used to notify the requester that the returned token is an access token. The - // supported token type is Bearer . - TokenType *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateTokenWithIAM{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateTokenWithIAM{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateTokenWithIAM"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpCreateTokenWithIAMValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateTokenWithIAM(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opCreateTokenWithIAM(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateTokenWithIAM", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go deleted file mode 100644 index 09f016ec1e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go +++ /dev/null @@ -1,161 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Registers a client with IAM Identity Center. This allows clients to initiate -// device authorization. The output should be persisted for reuse through many -// authentication requests. -func (c *Client) RegisterClient(ctx context.Context, params *RegisterClientInput, optFns ...func(*Options)) (*RegisterClientOutput, error) { - if params == nil { - params = &RegisterClientInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "RegisterClient", params, optFns, c.addOperationRegisterClientMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*RegisterClientOutput) - out.ResultMetadata = metadata - return out, nil -} - -type RegisterClientInput struct { - - // The friendly name of the client. - // - // This member is required. - ClientName *string - - // The type of client. The service supports only public as a client type. Anything - // other than public will be rejected by the service. - // - // This member is required. - ClientType *string - - // The list of scopes that are defined by the client. Upon authorization, this - // list is used to restrict permissions when granting an access token. - Scopes []string - - noSmithyDocumentSerde -} - -type RegisterClientOutput struct { - - // An endpoint that the client can use to request authorization. - AuthorizationEndpoint *string - - // The unique identifier string for each client. This client uses this identifier - // to get authenticated by the service in subsequent calls. - ClientId *string - - // Indicates the time at which the clientId and clientSecret were issued. - ClientIdIssuedAt int64 - - // A secret string generated for the client. The client will use this string to - // get authenticated by the service in subsequent calls. - ClientSecret *string - - // Indicates the time at which the clientId and clientSecret will become invalid. - ClientSecretExpiresAt int64 - - // An endpoint that the client can use to create tokens. - TokenEndpoint *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpRegisterClient{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpRegisterClient{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "RegisterClient"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpRegisterClientValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterClient(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opRegisterClient(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "RegisterClient", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go deleted file mode 100644 index c568805b22..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go +++ /dev/null @@ -1,169 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Initiates device authorization by requesting a pair of verification codes from -// the authorization service. -func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDeviceAuthorizationInput, optFns ...func(*Options)) (*StartDeviceAuthorizationOutput, error) { - if params == nil { - params = &StartDeviceAuthorizationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "StartDeviceAuthorization", params, optFns, c.addOperationStartDeviceAuthorizationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*StartDeviceAuthorizationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type StartDeviceAuthorizationInput struct { - - // The unique identifier string for the client that is registered with IAM - // Identity Center. This value should come from the persisted result of the - // RegisterClient API operation. - // - // This member is required. - ClientId *string - - // A secret string that is generated for the client. This value should come from - // the persisted result of the RegisterClient API operation. - // - // This member is required. - ClientSecret *string - - // The URL for the Amazon Web Services access portal. For more information, see - // Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) - // in the IAM Identity Center User Guide. - // - // This member is required. - StartUrl *string - - noSmithyDocumentSerde -} - -type StartDeviceAuthorizationOutput struct { - - // The short-lived code that is used by the device when polling for a session - // token. - DeviceCode *string - - // Indicates the number of seconds in which the verification code will become - // invalid. - ExpiresIn int32 - - // Indicates the number of seconds the client must wait between attempts when - // polling for a session. - Interval int32 - - // A one-time user verification code. This is needed to authorize an in-use device. - UserCode *string - - // The URI of the verification page that takes the userCode to authorize the - // device. - VerificationUri *string - - // An alternate URL that the client can use to automatically launch a browser. - // This process skips the manual step in which the user visits the verification - // page and enters their code. - VerificationUriComplete *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpStartDeviceAuthorization{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpStartDeviceAuthorization{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "StartDeviceAuthorization"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartDeviceAuthorization(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opStartDeviceAuthorization(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "StartDeviceAuthorization", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go deleted file mode 100644 index 40b3becb9f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go +++ /dev/null @@ -1,302 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { - params.Region = options.Region -} - -type setLegacyContextSigningOptionsMiddleware struct { -} - -func (*setLegacyContextSigningOptionsMiddleware) ID() string { - return "setLegacyContextSigningOptions" -} - -func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - schemeID := rscheme.Scheme.SchemeID() - - if sn := awsmiddleware.GetSigningName(ctx); sn != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) - } - } - - if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) - } - } - - return next.HandleFinalize(ctx, in) -} - -func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) -} - -type withAnonymous struct { - resolver AuthSchemeResolver -} - -var _ AuthSchemeResolver = (*withAnonymous)(nil) - -func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - opts, err := v.resolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return nil, err - } - - opts = append(opts, &smithyauth.Option{ - SchemeID: smithyauth.SchemeIDAnonymous, - }) - return opts, nil -} - -func wrapWithAnonymousAuth(options *Options) { - if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { - return - } - - options.AuthSchemeResolver = &withAnonymous{ - resolver: options.AuthSchemeResolver, - } -} - -// AuthResolverParameters contains the set of inputs necessary for auth scheme -// resolution. -type AuthResolverParameters struct { - // The name of the operation being invoked. - Operation string - - // The region in which the operation is being invoked. - Region string -} - -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { - params := &AuthResolverParameters{ - Operation: operation, - } - - bindAuthParamsRegion(params, input, options) - - return params -} - -// AuthSchemeResolver returns a set of possible authentication options for an -// operation. -type AuthSchemeResolver interface { - ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) -} - -type defaultAuthSchemeResolver struct{} - -var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) - -func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - if overrides, ok := operationAuthOptions[params.Operation]; ok { - return overrides(params), nil - } - return serviceAuthOptions(params), nil -} - -var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ - "CreateToken": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "RegisterClient": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "StartDeviceAuthorization": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, -} - -func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "sso-oauth") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - return props - }(), - }, - } -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) - options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) - } - - scheme, ok := m.selectScheme(options) - if !ok { - return out, metadata, fmt.Errorf("could not select an auth scheme") - } - - ctx = setResolvedAuthScheme(ctx, scheme) - return next.HandleFinalize(ctx, in) -} - -func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - for _, option := range options { - if option.SchemeID == smithyauth.SchemeIDAnonymous { - return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true - } - - for _, scheme := range m.options.AuthSchemes { - if scheme.SchemeID() != option.SchemeID { - continue - } - - if scheme.IdentityResolver(m.options) != nil { - return newResolvedAuthScheme(scheme, option), true - } - } - } - - return nil, false -} - -type resolvedAuthSchemeKey struct{} - -type resolvedAuthScheme struct { - Scheme smithyhttp.AuthScheme - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { - return &resolvedAuthScheme{ - Scheme: scheme, - IdentityProperties: option.IdentityProperties, - SignerProperties: option.SignerProperties, - } -} - -func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { - return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) -} - -func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { - v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) - return v -} - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - resolver := rscheme.Scheme.IdentityResolver(m.options) - if resolver == nil { - return out, metadata, fmt.Errorf("no identity resolver") - } - - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) - if err != nil { - return out, metadata, fmt.Errorf("get identity: %w", err) - } - - ctx = setIdentity(ctx, identity) - return next.HandleFinalize(ctx, in) -} - -type identityKey struct{} - -func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { - return middleware.WithStackValue(ctx, identityKey{}, identity) -} - -func getIdentity(ctx context.Context) smithyauth.Identity { - v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) - return v -} - -type signRequestMiddleware struct { -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - identity := getIdentity(ctx) - if identity == nil { - return out, metadata, fmt.Errorf("no identity") - } - - signer := rscheme.Scheme.Signer() - if signer == nil { - return out, metadata, fmt.Errorf("no signer") - } - - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { - return out, metadata, fmt.Errorf("sign request: %w", err) - } - - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go deleted file mode 100644 index 76a1160ece..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go +++ /dev/null @@ -1,2066 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" - "github.com/aws/aws-sdk-go-v2/service/ssooidc/types" - smithy "github.com/aws/smithy-go" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "strings" -) - -type awsRestjson1_deserializeOpCreateToken struct { -} - -func (*awsRestjson1_deserializeOpCreateToken) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorCreateToken(response, &metadata) - } - output := &CreateTokenOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentCreateTokenOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("AccessDeniedException", errorCode): - return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) - - case strings.EqualFold("AuthorizationPendingException", errorCode): - return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody) - - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("InternalServerException", errorCode): - return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - - case strings.EqualFold("InvalidClientException", errorCode): - return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) - - case strings.EqualFold("InvalidGrantException", errorCode): - return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody) - - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("InvalidScopeException", errorCode): - return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) - - case strings.EqualFold("SlowDownException", errorCode): - return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) - - case strings.EqualFold("UnauthorizedClientException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) - - case strings.EqualFold("UnsupportedGrantTypeException", errorCode): - return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentCreateTokenOutput(v **CreateTokenOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *CreateTokenOutput - if *v == nil { - sv = &CreateTokenOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accessToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value) - } - sv.AccessToken = ptr.String(jtv) - } - - case "expiresIn": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ExpiresIn = int32(i64) - } - - case "idToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected IdToken to be of type string, got %T instead", value) - } - sv.IdToken = ptr.String(jtv) - } - - case "refreshToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value) - } - sv.RefreshToken = ptr.String(jtv) - } - - case "tokenType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected TokenType to be of type string, got %T instead", value) - } - sv.TokenType = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpCreateTokenWithIAM struct { -} - -func (*awsRestjson1_deserializeOpCreateTokenWithIAM) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpCreateTokenWithIAM) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response, &metadata) - } - output := &CreateTokenWithIAMOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("AccessDeniedException", errorCode): - return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) - - case strings.EqualFold("AuthorizationPendingException", errorCode): - return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody) - - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("InternalServerException", errorCode): - return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - - case strings.EqualFold("InvalidClientException", errorCode): - return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) - - case strings.EqualFold("InvalidGrantException", errorCode): - return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody) - - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("InvalidRequestRegionException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestRegionException(response, errorBody) - - case strings.EqualFold("InvalidScopeException", errorCode): - return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) - - case strings.EqualFold("SlowDownException", errorCode): - return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) - - case strings.EqualFold("UnauthorizedClientException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) - - case strings.EqualFold("UnsupportedGrantTypeException", errorCode): - return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(v **CreateTokenWithIAMOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *CreateTokenWithIAMOutput - if *v == nil { - sv = &CreateTokenWithIAMOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accessToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value) - } - sv.AccessToken = ptr.String(jtv) - } - - case "expiresIn": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ExpiresIn = int32(i64) - } - - case "idToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected IdToken to be of type string, got %T instead", value) - } - sv.IdToken = ptr.String(jtv) - } - - case "issuedTokenType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected TokenTypeURI to be of type string, got %T instead", value) - } - sv.IssuedTokenType = ptr.String(jtv) - } - - case "refreshToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value) - } - sv.RefreshToken = ptr.String(jtv) - } - - case "scope": - if err := awsRestjson1_deserializeDocumentScopes(&sv.Scope, value); err != nil { - return err - } - - case "tokenType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected TokenType to be of type string, got %T instead", value) - } - sv.TokenType = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpRegisterClient struct { -} - -func (*awsRestjson1_deserializeOpRegisterClient) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorRegisterClient(response, &metadata) - } - output := &RegisterClientOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentRegisterClientOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InternalServerException", errorCode): - return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - - case strings.EqualFold("InvalidClientMetadataException", errorCode): - return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody) - - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("InvalidScopeException", errorCode): - return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentRegisterClientOutput(v **RegisterClientOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *RegisterClientOutput - if *v == nil { - sv = &RegisterClientOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "authorizationEndpoint": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected URI to be of type string, got %T instead", value) - } - sv.AuthorizationEndpoint = ptr.String(jtv) - } - - case "clientId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ClientId to be of type string, got %T instead", value) - } - sv.ClientId = ptr.String(jtv) - } - - case "clientIdIssuedAt": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ClientIdIssuedAt = i64 - } - - case "clientSecret": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ClientSecret to be of type string, got %T instead", value) - } - sv.ClientSecret = ptr.String(jtv) - } - - case "clientSecretExpiresAt": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ClientSecretExpiresAt = i64 - } - - case "tokenEndpoint": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected URI to be of type string, got %T instead", value) - } - sv.TokenEndpoint = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpStartDeviceAuthorization struct { -} - -func (*awsRestjson1_deserializeOpStartDeviceAuthorization) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response, &metadata) - } - output := &StartDeviceAuthorizationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InternalServerException", errorCode): - return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - - case strings.EqualFold("InvalidClientException", errorCode): - return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) - - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("SlowDownException", errorCode): - return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) - - case strings.EqualFold("UnauthorizedClientException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(v **StartDeviceAuthorizationOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *StartDeviceAuthorizationOutput - if *v == nil { - sv = &StartDeviceAuthorizationOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "deviceCode": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected DeviceCode to be of type string, got %T instead", value) - } - sv.DeviceCode = ptr.String(jtv) - } - - case "expiresIn": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ExpiresIn = int32(i64) - } - - case "interval": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected IntervalInSeconds to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.Interval = int32(i64) - } - - case "userCode": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UserCode to be of type string, got %T instead", value) - } - sv.UserCode = ptr.String(jtv) - } - - case "verificationUri": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected URI to be of type string, got %T instead", value) - } - sv.VerificationUri = ptr.String(jtv) - } - - case "verificationUriComplete": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected URI to be of type string, got %T instead", value) - } - sv.VerificationUriComplete = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.AccessDeniedException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentAccessDeniedException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorAuthorizationPendingException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.AuthorizationPendingException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentAuthorizationPendingException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ExpiredTokenException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentExpiredTokenException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InternalServerException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInternalServerException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidClientException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidClientException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidClientMetadataException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidClientMetadataException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidClientMetadataException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidGrantException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidGrantException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidRequestException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidRequestRegionException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidRequestRegionException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidRequestRegionException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidScopeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidScopeException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidScopeException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorSlowDownException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.SlowDownException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentSlowDownException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorUnauthorizedClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.UnauthorizedClientException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentUnauthorizedClientException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.UnsupportedGrantTypeException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.AccessDeniedException - if *v == nil { - sv = &types.AccessDeniedException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentAuthorizationPendingException(v **types.AuthorizationPendingException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.AuthorizationPendingException - if *v == nil { - sv = &types.AuthorizationPendingException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ExpiredTokenException - if *v == nil { - sv = &types.ExpiredTokenException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InternalServerException - if *v == nil { - sv = &types.InternalServerException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidClientException(v **types.InvalidClientException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidClientException - if *v == nil { - sv = &types.InvalidClientException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidClientMetadataException(v **types.InvalidClientMetadataException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidClientMetadataException - if *v == nil { - sv = &types.InvalidClientMetadataException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGrantException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidGrantException - if *v == nil { - sv = &types.InvalidGrantException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidRequestException - if *v == nil { - sv = &types.InvalidRequestException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidRequestRegionException(v **types.InvalidRequestRegionException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidRequestRegionException - if *v == nil { - sv = &types.InvalidRequestRegionException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "endpoint": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Location to be of type string, got %T instead", value) - } - sv.Endpoint = ptr.String(jtv) - } - - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - case "region": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Region to be of type string, got %T instead", value) - } - sv.Region = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidScopeException(v **types.InvalidScopeException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidScopeException - if *v == nil { - sv = &types.InvalidScopeException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentScopes(v *[]string, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []string - if *v == nil { - cv = []string{} - } else { - cv = *v - } - - for _, value := range shape { - var col string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Scope to be of type string, got %T instead", value) - } - col = jtv - } - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsRestjson1_deserializeDocumentSlowDownException(v **types.SlowDownException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.SlowDownException - if *v == nil { - sv = &types.SlowDownException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentUnauthorizedClientException(v **types.UnauthorizedClientException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnauthorizedClientException - if *v == nil { - sv = &types.UnauthorizedClientException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(v **types.UnsupportedGrantTypeException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnsupportedGrantTypeException - if *v == nil { - sv = &types.UnsupportedGrantTypeException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go deleted file mode 100644 index 53cd4f55a0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go +++ /dev/null @@ -1,38 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -// Package ssooidc provides the API client, operations, and parameter types for -// AWS SSO OIDC. -// -// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a -// client (such as CLI or a native application) to register with IAM Identity -// Center. The service also enables the client to fetch the user’s access token -// upon successful authentication and authorization with IAM Identity Center. IAM -// Identity Center uses the sso and identitystore API namespaces. Considerations -// for Using This Guide Before you begin using this guide, we recommend that you -// first review the following important information about how the IAM Identity -// Center OIDC service works. -// - The IAM Identity Center OIDC service currently implements only the portions -// of the OAuth 2.0 Device Authorization Grant standard ( -// https://tools.ietf.org/html/rfc8628 (https://tools.ietf.org/html/rfc8628) ) -// that are necessary to enable single sign-on authentication with the CLI. -// - With older versions of the CLI, the service only emits OIDC access tokens, -// so to obtain a new token, users must explicitly re-authenticate. To access the -// OIDC flow that supports token refresh and doesn’t require re-authentication, -// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with -// support for OIDC token refresh and configurable IAM Identity Center session -// durations. For more information, see Configure Amazon Web Services access -// portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html) -// . -// - The access tokens provided by this service grant access to all Amazon Web -// Services account entitlements assigned to an IAM Identity Center user, not just -// a particular application. -// - The documentation in this guide does not describe the mechanism to convert -// the access token into Amazon Web Services Auth (“sigv4”) credentials for use -// with IAM-protected Amazon Web Services service endpoints. For more information, -// see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) -// in the IAM Identity Center Portal API Reference Guide. -// -// For general information about IAM Identity Center, see What is IAM Identity -// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) -// in the IAM Identity Center User Guide. -package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go deleted file mode 100644 index 94e835e711..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go +++ /dev/null @@ -1,535 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - "github.com/aws/aws-sdk-go-v2/internal/endpoints" - "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" - internalendpoints "github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints" - smithyauth "github.com/aws/smithy-go/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "net/url" - "os" - "strings" -) - -// EndpointResolverOptions is the service endpoint resolver options -type EndpointResolverOptions = internalendpoints.Options - -// EndpointResolver interface for resolving service endpoints. -type EndpointResolver interface { - ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) -} - -var _ EndpointResolver = &internalendpoints.Resolver{} - -// NewDefaultEndpointResolver constructs a new service endpoint resolver -func NewDefaultEndpointResolver() *internalendpoints.Resolver { - return internalendpoints.New() -} - -// EndpointResolverFunc is a helper utility that wraps a function so it satisfies -// the EndpointResolver interface. This is useful when you want to add additional -// endpoint resolving logic, or stub out specific endpoints with custom values. -type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) - -func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return fn(region, options) -} - -// EndpointResolverFromURL returns an EndpointResolver configured using the -// provided endpoint url. By default, the resolved endpoint resolver uses the -// client region as signing region, and the endpoint source is set to -// EndpointSourceCustom.You can provide functional options to configure endpoint -// values for the resolved endpoint. -func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { - e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} - for _, fn := range optFns { - fn(&e) - } - - return EndpointResolverFunc( - func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { - if len(e.SigningRegion) == 0 { - e.SigningRegion = region - } - return e, nil - }, - ) -} - -type ResolveEndpoint struct { - Resolver EndpointResolver - Options EndpointResolverOptions -} - -func (*ResolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.Resolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - eo := m.Options - eo.Logger = middleware.GetLogger(ctx) - - var endpoint aws.Endpoint - endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) - if err != nil { - nf := (&aws.EndpointNotFoundError{}) - if errors.As(err, &nf) { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) - return next.HandleSerialize(ctx, in) - } - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(awsmiddleware.GetSigningName(ctx)) == 0 { - signingName := endpoint.SigningName - if len(signingName) == 0 { - signingName = "sso-oauth" - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - } - ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) - ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) - return next.HandleSerialize(ctx, in) -} -func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&ResolveEndpoint{ - Resolver: o.EndpointResolver, - Options: o.EndpointOptions, - }, "OperationSerializer", middleware.Before) -} - -func removeResolveEndpointMiddleware(stack *middleware.Stack) error { - _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) - return err -} - -type wrappedEndpointResolver struct { - awsResolver aws.EndpointResolverWithOptions -} - -func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return w.awsResolver.ResolveEndpoint(ServiceID, region, options) -} - -type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) - -func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { - return a(service, region) -} - -var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) - -// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. -// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, -// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked -// via its middleware. -// -// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. -func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { - var resolver aws.EndpointResolverWithOptions - - if awsResolverWithOptions != nil { - resolver = awsResolverWithOptions - } else if awsResolver != nil { - resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) - } - - return &wrappedEndpointResolver{ - awsResolver: resolver, - } -} - -func finalizeClientEndpointResolverOptions(options *Options) { - options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() - - if len(options.EndpointOptions.ResolvedRegion) == 0 { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(options.Region, fipsInfix) || - strings.Contains(options.Region, fipsPrefix) || - strings.Contains(options.Region, fipsSuffix) { - options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled - } - } - -} - -func resolveEndpointResolverV2(options *Options) { - if options.EndpointResolverV2 == nil { - options.EndpointResolverV2 = NewDefaultEndpointResolverV2() - } -} - -func resolveBaseEndpoint(cfg aws.Config, o *Options) { - if cfg.BaseEndpoint != nil { - o.BaseEndpoint = cfg.BaseEndpoint - } - - _, g := os.LookupEnv("AWS_ENDPOINT_URL") - _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO_OIDC") - - if g && !s { - return - } - - value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO OIDC", cfg.ConfigSources) - if found && err == nil { - o.BaseEndpoint = &value - } -} - -func bindRegion(region string) *string { - if region == "" { - return nil - } - return aws.String(endpoints.MapFIPSRegion(region)) -} - -// EndpointParameters provides the parameters that influence how endpoints are -// resolved. -type EndpointParameters struct { - // The AWS region used to dispatch the request. - // - // Parameter is - // required. - // - // AWS::Region - Region *string - - // When true, use the dual-stack endpoint. If the configured endpoint does not - // support dual-stack, dispatching the request MAY return an error. - // - // Defaults to - // false if no value is provided. - // - // AWS::UseDualStack - UseDualStack *bool - - // When true, send this request to the FIPS-compliant regional endpoint. If the - // configured endpoint does not have a FIPS compliant endpoint, dispatching the - // request will return an error. - // - // Defaults to false if no value is - // provided. - // - // AWS::UseFIPS - UseFIPS *bool - - // Override the endpoint used to send this request - // - // Parameter is - // required. - // - // SDK::Endpoint - Endpoint *string -} - -// ValidateRequired validates required parameters are set. -func (p EndpointParameters) ValidateRequired() error { - if p.UseDualStack == nil { - return fmt.Errorf("parameter UseDualStack is required") - } - - if p.UseFIPS == nil { - return fmt.Errorf("parameter UseFIPS is required") - } - - return nil -} - -// WithDefaults returns a shallow copy of EndpointParameterswith default values -// applied to members where applicable. -func (p EndpointParameters) WithDefaults() EndpointParameters { - if p.UseDualStack == nil { - p.UseDualStack = ptr.Bool(false) - } - - if p.UseFIPS == nil { - p.UseFIPS = ptr.Bool(false) - } - return p -} - -// EndpointResolverV2 provides the interface for resolving service endpoints. -type EndpointResolverV2 interface { - // ResolveEndpoint attempts to resolve the endpoint with the provided options, - // returning the endpoint if found. Otherwise an error is returned. - ResolveEndpoint(ctx context.Context, params EndpointParameters) ( - smithyendpoints.Endpoint, error, - ) -} - -// resolver provides the implementation for resolving endpoints. -type resolver struct{} - -func NewDefaultEndpointResolverV2() EndpointResolverV2 { - return &resolver{} -} - -// ResolveEndpoint attempts to resolve the endpoint with the provided options, -// returning the endpoint if found. Otherwise an error is returned. -func (r *resolver) ResolveEndpoint( - ctx context.Context, params EndpointParameters, -) ( - endpoint smithyendpoints.Endpoint, err error, -) { - params = params.WithDefaults() - if err = params.ValidateRequired(); err != nil { - return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) - } - _UseDualStack := *params.UseDualStack - _UseFIPS := *params.UseFIPS - - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") - } - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") - } - uriString := _Endpoint - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _PartitionResult := *exprVal - _ = _PartitionResult - if _UseFIPS == true { - if _UseDualStack == true { - if true == _PartitionResult.SupportsFIPS { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://oidc-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") - } - } - if _UseFIPS == true { - if _PartitionResult.SupportsFIPS == true { - if _PartitionResult.Name == "aws-us-gov" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://oidc.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://oidc-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") - } - if _UseDualStack == true { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://oidc.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://oidc.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") -} - -type endpointParamsBinder interface { - bindEndpointParams(*EndpointParameters) -} - -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { - params := &EndpointParameters{} - - params.Region = bindRegion(options.Region) - params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) - params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) - params.Endpoint = options.BaseEndpoint - - if b, ok := input.(endpointParamsBinder); ok { - b.bindEndpointParams(params) - } - - return params -} - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.options.EndpointResolverV2 == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := bindEndpointParams(getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - if endpt.URI.RawPath == "" && req.URL.RawPath != "" { - endpt.URI.RawPath = endpt.URI.Path - } - req.URL.Scheme = endpt.URI.Scheme - req.URL.Host = endpt.URI.Host - req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) - req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) - for k := range endpt.Headers { - req.Header.Set(k, endpt.Headers.Get(k)) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) - for _, o := range opts { - rscheme.SignerProperties.SetAll(&o.SignerProperties) - } - - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json deleted file mode 100644 index b2a52633ba..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "dependencies": { - "github.com/aws/aws-sdk-go-v2": "v1.4.0", - "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/smithy-go": "v1.4.0" - }, - "files": [ - "api_client.go", - "api_client_test.go", - "api_op_CreateToken.go", - "api_op_CreateTokenWithIAM.go", - "api_op_RegisterClient.go", - "api_op_StartDeviceAuthorization.go", - "auth.go", - "deserializers.go", - "doc.go", - "endpoints.go", - "endpoints_config_test.go", - "endpoints_test.go", - "generated.json", - "internal/endpoints/endpoints.go", - "internal/endpoints/endpoints_test.go", - "options.go", - "protocol_test.go", - "serializers.go", - "snapshot_test.go", - "types/errors.go", - "types/types.go", - "validators.go" - ], - "go": "1.15", - "module": "github.com/aws/aws-sdk-go-v2/service/ssooidc", - "unstable": false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go deleted file mode 100644 index e81f202fd8..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package ssooidc - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.23.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go deleted file mode 100644 index aa20725343..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go +++ /dev/null @@ -1,542 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package endpoints - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" - "github.com/aws/smithy-go/logging" - "regexp" -) - -// Options is the endpoint resolver configuration options -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the - // provided logger. - LogDeprecated bool - - // ResolvedRegion is used to override the region to be resolved, rather then the - // using the value passed to the ResolveEndpoint method. This value is used by the - // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative - // name. You must not set this value directly in your application. - ResolvedRegion string - - // DisableHTTPS informs the resolver to return an endpoint that does not use the - // HTTPS scheme. - DisableHTTPS bool - - // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. - UseDualStackEndpoint aws.DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint aws.FIPSEndpointState -} - -func (o Options) GetResolvedRegion() string { - return o.ResolvedRegion -} - -func (o Options) GetDisableHTTPS() bool { - return o.DisableHTTPS -} - -func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { - return o.UseDualStackEndpoint -} - -func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { - return o.UseFIPSEndpoint -} - -func transformToSharedOptions(options Options) endpoints.Options { - return endpoints.Options{ - Logger: options.Logger, - LogDeprecated: options.LogDeprecated, - ResolvedRegion: options.ResolvedRegion, - DisableHTTPS: options.DisableHTTPS, - UseDualStackEndpoint: options.UseDualStackEndpoint, - UseFIPSEndpoint: options.UseFIPSEndpoint, - } -} - -// Resolver SSO OIDC endpoint resolver -type Resolver struct { - partitions endpoints.Partitions -} - -// ResolveEndpoint resolves the service endpoint for the given region and options -func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { - if len(region) == 0 { - return endpoint, &aws.MissingRegionError{} - } - - opt := transformToSharedOptions(options) - return r.partitions.ResolveEndpoint(region, opt) -} - -// New returns a new Resolver -func New() *Resolver { - return &Resolver{ - partitions: defaultPartitions, - } -} - -var partitionRegexp = struct { - Aws *regexp.Regexp - AwsCn *regexp.Regexp - AwsIso *regexp.Regexp - AwsIsoB *regexp.Regexp - AwsIsoE *regexp.Regexp - AwsIsoF *regexp.Regexp - AwsUsGov *regexp.Regexp -}{ - - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), - AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), - AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), - AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), - AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), - AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), - AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), -} - -var defaultPartitions = endpoints.Partitions{ - { - ID: "aws", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "oidc.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "oidc-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.Aws, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "af-south-1", - }: endpoints.Endpoint{ - Hostname: "oidc.af-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "af-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-east-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-northeast-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-2", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-northeast-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-3", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-northeast-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-3", - }, - }, - endpoints.EndpointKey{ - Region: "ap-south-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-southeast-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-2", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-southeast-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-3", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-southeast-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-3", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-4", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-southeast-4.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-4", - }, - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ca-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-central-1", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-central-2", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-central-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-central-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-north-1", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-north-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-north-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-south-1", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-1", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-2", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-3", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-west-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-3", - }, - }, - endpoints.EndpointKey{ - Region: "il-central-1", - }: endpoints.Endpoint{ - Hostname: "oidc.il-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "il-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "me-central-1", - }: endpoints.Endpoint{ - Hostname: "oidc.me-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "me-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "me-south-1", - }: endpoints.Endpoint{ - Hostname: "oidc.me-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "me-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "sa-east-1", - }: endpoints.Endpoint{ - Hostname: "oidc.sa-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "sa-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - }: endpoints.Endpoint{ - Hostname: "oidc.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-2", - }: endpoints.Endpoint{ - Hostname: "oidc.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - }: endpoints.Endpoint{ - Hostname: "oidc.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - }: endpoints.Endpoint{ - Hostname: "oidc.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - { - ID: "aws-cn", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "oidc.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "oidc-fips.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsCn, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "cn-north-1", - }: endpoints.Endpoint{ - Hostname: "oidc.cn-north-1.amazonaws.com.cn", - CredentialScope: endpoints.CredentialScope{ - Region: "cn-north-1", - }, - }, - endpoints.EndpointKey{ - Region: "cn-northwest-1", - }: endpoints.Endpoint{ - Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", - CredentialScope: endpoints.CredentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - { - ID: "aws-iso", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIso, - IsRegionalized: true, - }, - { - ID: "aws-iso-b", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoB, - IsRegionalized: true, - }, - { - ID: "aws-iso-e", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoE, - IsRegionalized: true, - }, - { - ID: "aws-iso-f", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoF, - IsRegionalized: true, - }, - { - ID: "aws-us-gov", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "oidc.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "oidc-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsUsGov, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-gov-east-1", - }: endpoints.Endpoint{ - Hostname: "oidc.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - }: endpoints.Endpoint{ - Hostname: "oidc.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go deleted file mode 100644 index b964e7e109..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go +++ /dev/null @@ -1,217 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" -) - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. - EndpointResolverV2 EndpointResolverV2 - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // The auth scheme resolver which determines how to authenticate for each - // operation. - AuthSchemeResolver AuthSchemeResolver - - // The list of auth schemes supported by the client. - AuthSchemes []smithyhttp.AuthScheme -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - - return to -} - -func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { - if schemeID == "aws.auth#sigv4" { - return getSigV4IdentityResolver(o) - } - if schemeID == "smithy.api#noAuth" { - return &smithyauth.AnonymousIdentityResolver{} - } - return nil -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { - if o.Credentials != nil { - return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} - } - return nil -} - -// WithSigV4SigningName applies an override to the authentication workflow to -// use the given signing name for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing name from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningName(name string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), - middleware.Before, - ) - }) - } -} - -// WithSigV4SigningRegion applies an override to the authentication workflow to -// use the given signing region for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing region from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningRegion(region string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), - middleware.Before, - ) - }) - } -} - -func ignoreAnonymousAuth(options *Options) { - if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { - options.Credentials = nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go deleted file mode 100644 index 754218b78e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go +++ /dev/null @@ -1,431 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "bytes" - "context" - "fmt" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/encoding/httpbinding" - smithyjson "github.com/aws/smithy-go/encoding/json" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -type awsRestjson1_serializeOpCreateToken struct { -} - -func (*awsRestjson1_serializeOpCreateToken) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateTokenInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/token") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - restEncoder.SetHeader("Content-Type").String("application/json") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsRestjson1_serializeOpDocumentCreateTokenInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsCreateTokenInput(v *CreateTokenInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - return nil -} - -func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ClientId != nil { - ok := object.Key("clientId") - ok.String(*v.ClientId) - } - - if v.ClientSecret != nil { - ok := object.Key("clientSecret") - ok.String(*v.ClientSecret) - } - - if v.Code != nil { - ok := object.Key("code") - ok.String(*v.Code) - } - - if v.DeviceCode != nil { - ok := object.Key("deviceCode") - ok.String(*v.DeviceCode) - } - - if v.GrantType != nil { - ok := object.Key("grantType") - ok.String(*v.GrantType) - } - - if v.RedirectUri != nil { - ok := object.Key("redirectUri") - ok.String(*v.RedirectUri) - } - - if v.RefreshToken != nil { - ok := object.Key("refreshToken") - ok.String(*v.RefreshToken) - } - - if v.Scope != nil { - ok := object.Key("scope") - if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil { - return err - } - } - - return nil -} - -type awsRestjson1_serializeOpCreateTokenWithIAM struct { -} - -func (*awsRestjson1_serializeOpCreateTokenWithIAM) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpCreateTokenWithIAM) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateTokenWithIAMInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/token?aws_iam=t") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - restEncoder.SetHeader("Content-Type").String("application/json") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - return nil -} - -func awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Assertion != nil { - ok := object.Key("assertion") - ok.String(*v.Assertion) - } - - if v.ClientId != nil { - ok := object.Key("clientId") - ok.String(*v.ClientId) - } - - if v.Code != nil { - ok := object.Key("code") - ok.String(*v.Code) - } - - if v.GrantType != nil { - ok := object.Key("grantType") - ok.String(*v.GrantType) - } - - if v.RedirectUri != nil { - ok := object.Key("redirectUri") - ok.String(*v.RedirectUri) - } - - if v.RefreshToken != nil { - ok := object.Key("refreshToken") - ok.String(*v.RefreshToken) - } - - if v.RequestedTokenType != nil { - ok := object.Key("requestedTokenType") - ok.String(*v.RequestedTokenType) - } - - if v.Scope != nil { - ok := object.Key("scope") - if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil { - return err - } - } - - if v.SubjectToken != nil { - ok := object.Key("subjectToken") - ok.String(*v.SubjectToken) - } - - if v.SubjectTokenType != nil { - ok := object.Key("subjectTokenType") - ok.String(*v.SubjectTokenType) - } - - return nil -} - -type awsRestjson1_serializeOpRegisterClient struct { -} - -func (*awsRestjson1_serializeOpRegisterClient) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*RegisterClientInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/client/register") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - restEncoder.SetHeader("Content-Type").String("application/json") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsRestjson1_serializeOpDocumentRegisterClientInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsRegisterClientInput(v *RegisterClientInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - return nil -} - -func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ClientName != nil { - ok := object.Key("clientName") - ok.String(*v.ClientName) - } - - if v.ClientType != nil { - ok := object.Key("clientType") - ok.String(*v.ClientType) - } - - if v.Scopes != nil { - ok := object.Key("scopes") - if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil { - return err - } - } - - return nil -} - -type awsRestjson1_serializeOpStartDeviceAuthorization struct { -} - -func (*awsRestjson1_serializeOpStartDeviceAuthorization) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*StartDeviceAuthorizationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/device_authorization") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - restEncoder.SetHeader("Content-Type").String("application/json") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - return nil -} - -func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ClientId != nil { - ok := object.Key("clientId") - ok.String(*v.ClientId) - } - - if v.ClientSecret != nil { - ok := object.Key("clientSecret") - ok.String(*v.ClientSecret) - } - - if v.StartUrl != nil { - ok := object.Key("startUrl") - ok.String(*v.StartUrl) - } - - return nil -} - -func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go deleted file mode 100644 index 86b62049fd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go +++ /dev/null @@ -1,398 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - "fmt" - smithy "github.com/aws/smithy-go" -) - -// You do not have sufficient access to perform this action. -type AccessDeniedException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *AccessDeniedException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *AccessDeniedException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *AccessDeniedException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "AccessDeniedException" - } - return *e.ErrorCodeOverride -} -func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that a request to authorize a client with an access user session -// token is pending. -type AuthorizationPendingException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *AuthorizationPendingException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *AuthorizationPendingException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *AuthorizationPendingException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "AuthorizationPendingException" - } - return *e.ErrorCodeOverride -} -func (e *AuthorizationPendingException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the token issued by the service is expired and is no longer -// valid. -type ExpiredTokenException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *ExpiredTokenException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ExpiredTokenException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ExpiredTokenException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ExpiredTokenException" - } - return *e.ErrorCodeOverride -} -func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that an error from the service occurred while trying to process a -// request. -type InternalServerException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InternalServerException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InternalServerException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InternalServerException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InternalServerException" - } - return *e.ErrorCodeOverride -} -func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } - -// Indicates that the clientId or clientSecret in the request is invalid. For -// example, this can occur when a client sends an incorrect clientId or an expired -// clientSecret . -type InvalidClientException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidClientException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidClientException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidClientException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidClientException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the client information sent in the request during registration -// is invalid. -type InvalidClientMetadataException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidClientMetadataException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidClientMetadataException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidClientMetadataException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidClientMetadataException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that a request contains an invalid grant. This can occur if a client -// makes a CreateToken request with an invalid grant type. -type InvalidGrantException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidGrantException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidGrantException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidGrantException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidGrantException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that something is wrong with the input to the request. For example, a -// required parameter might be missing or out of range. -type InvalidRequestException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidRequestException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidRequestException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidRequestException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidRequestException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that a token provided as input to the request was issued by and is -// only usable by calling IAM Identity Center endpoints in another region. -type InvalidRequestRegionException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - Endpoint *string - Region *string - - noSmithyDocumentSerde -} - -func (e *InvalidRequestRegionException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidRequestRegionException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidRequestRegionException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidRequestRegionException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidRequestRegionException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the scope provided in the request is invalid. -type InvalidScopeException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidScopeException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidScopeException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidScopeException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidScopeException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidScopeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the client is making the request too frequently and is more than -// the service can handle. -type SlowDownException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *SlowDownException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *SlowDownException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *SlowDownException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "SlowDownException" - } - return *e.ErrorCodeOverride -} -func (e *SlowDownException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the client is not currently authorized to make the request. This -// can happen when a clientId is not issued for a public client. -type UnauthorizedClientException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *UnauthorizedClientException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnauthorizedClientException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnauthorizedClientException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnauthorizedClientException" - } - return *e.ErrorCodeOverride -} -func (e *UnauthorizedClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the grant type in the request is not supported by the service. -type UnsupportedGrantTypeException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *UnsupportedGrantTypeException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnsupportedGrantTypeException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnsupportedGrantTypeException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnsupportedGrantTypeException" - } - return *e.ErrorCodeOverride -} -func (e *UnsupportedGrantTypeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go deleted file mode 100644 index 0ec0789f8d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go +++ /dev/null @@ -1,9 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - smithydocument "github.com/aws/smithy-go/document" -) - -type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go deleted file mode 100644 index 9c17e4c8e1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go +++ /dev/null @@ -1,184 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -type validateOpCreateToken struct { -} - -func (*validateOpCreateToken) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateTokenInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateTokenInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCreateTokenWithIAM struct { -} - -func (*validateOpCreateTokenWithIAM) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateTokenWithIAM) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateTokenWithIAMInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateTokenWithIAMInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpRegisterClient struct { -} - -func (*validateOpRegisterClient) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpRegisterClient) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*RegisterClientInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpRegisterClientInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpStartDeviceAuthorization struct { -} - -func (*validateOpStartDeviceAuthorization) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpStartDeviceAuthorization) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*StartDeviceAuthorizationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpStartDeviceAuthorizationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -func addOpCreateTokenValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateToken{}, middleware.After) -} - -func addOpCreateTokenWithIAMValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateTokenWithIAM{}, middleware.After) -} - -func addOpRegisterClientValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpRegisterClient{}, middleware.After) -} - -func addOpStartDeviceAuthorizationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpStartDeviceAuthorization{}, middleware.After) -} - -func validateOpCreateTokenInput(v *CreateTokenInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateTokenInput"} - if v.ClientId == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientId")) - } - if v.ClientSecret == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) - } - if v.GrantType == nil { - invalidParams.Add(smithy.NewErrParamRequired("GrantType")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCreateTokenWithIAMInput(v *CreateTokenWithIAMInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateTokenWithIAMInput"} - if v.ClientId == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientId")) - } - if v.GrantType == nil { - invalidParams.Add(smithy.NewErrParamRequired("GrantType")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpRegisterClientInput(v *RegisterClientInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RegisterClientInput"} - if v.ClientName == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientName")) - } - if v.ClientType == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientType")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "StartDeviceAuthorizationInput"} - if v.ClientId == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientId")) - } - if v.ClientSecret == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) - } - if v.StartUrl == nil { - invalidParams.Add(smithy.NewErrParamRequired("StartUrl")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md deleted file mode 100644 index 1c50319455..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ /dev/null @@ -1,435 +0,0 @@ -# v1.28.5 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.4 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.3 (2024-03-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.2 (2024-03-04) - -* **Bug Fix**: Update internal/presigned-url dependency for corrected API name. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.1 (2024-02-23) - -* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.0 (2024-02-22) - -* **Feature**: Add middleware stack snapshot tests. - -# v1.27.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.1 (2024-02-20) - -* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. - -# v1.27.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.7 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.6 (2023-12-20) - -* No change notes available for this release. - -# v1.26.5 (2023-12-08) - -* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. - -# v1.26.4 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.3 (2023-12-06) - -* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. -* **Bug Fix**: STS `AssumeRoleWithSAML` and `AssumeRoleWithWebIdentity` would incorrectly attempt to use SigV4 authentication. - -# v1.26.2 (2023-12-01) - -* **Bug Fix**: Correct wrapping of errors in authentication workflow. -* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.1 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.0 (2023-11-29) - -* **Feature**: Expose Options() accessor on service clients. -* **Documentation**: Documentation updates for AWS Security Token Service. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.6 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.5 (2023-11-28) - -* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. - -# v1.25.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.3 (2023-11-17) - -* **Documentation**: API updates for the AWS Security Token Service - -# v1.25.2 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.1 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.2 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.1 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.0 (2023-10-02) - -* **Feature**: STS API updates for assumeRole - -# v1.22.0 (2023-09-18) - -* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. - -# v1.21.5 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.4 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.3 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.2 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.1 (2023-08-01) - -* No change notes available for this release. - -# v1.21.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.1 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2023-07-25) - -* **Feature**: API updates for the AWS Security Token Service - -# v1.19.3 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.2 (2023-06-15) - -* No change notes available for this release. - -# v1.19.1 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.0 (2023-05-08) - -* **Feature**: Documentation updates for AWS Security Token Service. - -# v1.18.11 (2023-05-04) - -* No change notes available for this release. - -# v1.18.10 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.9 (2023-04-10) - -* No change notes available for this release. - -# v1.18.8 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.7 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.6 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.5 (2023-02-22) - -* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. - -# v1.18.4 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.3 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. - -# v1.18.2 (2023-01-25) - -* **Documentation**: Doc only change to update wording in a key topic - -# v1.18.1 (2023-01-23) - -* No change notes available for this release. - -# v1.18.0 (2023-01-05) - -* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - -# v1.17.7 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.6 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2022-11-22) - -* No change notes available for this release. - -# v1.17.4 (2022-11-17) - -* **Documentation**: Documentation updates for AWS Security Token Service. - -# v1.17.3 (2022-11-16) - -* No change notes available for this release. - -# v1.17.2 (2022-11-10) - -* No change notes available for this release. - -# v1.17.1 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2022-10-21) - -* **Feature**: Add presign functionality for sts:AssumeRole operation -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.19 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.18 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.17 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.16 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.15 (2022-08-30) - -* No change notes available for this release. - -# v1.16.14 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.13 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.12 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.11 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.10 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.9 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.8 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.7 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.6 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.5 (2022-05-16) - -* **Documentation**: Documentation updates for AWS Security Token Service. - -# v1.16.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Documentation**: Updated service client model to latest release. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2022-02-24) - -* **Feature**: API client updated -* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2021-12-21) - -* **Feature**: Updated to latest service endpoints - -# v1.11.1 (2021-12-02) - -* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2021-11-30) - -* **Feature**: API client updated - -# v1.10.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2021-11-12) - -* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. - -# v1.9.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-10-21) - -* **Feature**: API client updated -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.2 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.1 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.2 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.1 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-07-15) - -* **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. -* **Documentation**: Updated service model to latest revision. -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-06-25) - -* **Feature**: API client updated -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go deleted file mode 100644 index 4d18dc86bd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ /dev/null @@ -1,689 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/defaults" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/protocol/query" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" - presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" - smithy "github.com/aws/smithy-go" - smithydocument "github.com/aws/smithy-go/document" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net" - "net/http" - "time" -) - -const ServiceID = "STS" -const ServiceAPIVersion = "2011-06-15" - -// Client provides the API client to make operations call for AWS Security Token -// Service. -type Client struct { - options Options -} - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - resolveDefaultLogger(&options) - - setResolvedDefaultsMode(&options) - - resolveRetryer(&options) - - resolveHTTPClient(&options) - - resolveHTTPSignerV4(&options) - - resolveEndpointResolverV2(&options) - - resolveAuthSchemeResolver(&options) - - for _, fn := range optFns { - fn(&options) - } - - finalizeRetryMaxAttempts(&options) - - ignoreAnonymousAuth(&options) - - wrapWithAnonymousAuth(&options) - - resolveAuthSchemes(&options) - - client := &Client{ - options: options, - } - - return client -} - -// Options returns a copy of the client configuration. -// -// Callers SHOULD NOT perform mutations on any inner structures within client -// config. Config overrides should instead be made on a per-operation basis through -// functional options. -func (c *Client) Options() Options { - return c.options.Copy() -} - -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { - ctx = middleware.ClearStackValues(ctx) - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - finalizeOperationRetryMaxAttempts(&options, *c) - - finalizeClientEndpointResolverOptions(&options) - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) - if err != nil { - err = &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - return result, metadata, err -} - -type operationInputKey struct{} - -func setOperationInput(ctx context.Context, input interface{}) context.Context { - return middleware.WithStackValue(ctx, operationInputKey{}, input) -} - -func getOperationInput(ctx context.Context) interface{} { - return middleware.GetStackValue(ctx, operationInputKey{}) -} - -type setOperationInputMiddleware struct { -} - -func (*setOperationInputMiddleware) ID() string { - return "setOperationInput" -} - -func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - ctx = setOperationInput(ctx, in.Parameters) - return next.HandleSerialize(ctx, in) -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %v", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %v", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} -func resolveAuthSchemeResolver(options *Options) { - if options.AuthSchemeResolver == nil { - options.AuthSchemeResolver = &defaultAuthSchemeResolver{} - } -} - -func resolveAuthSchemes(options *Options) { - if options.AuthSchemes == nil { - options.AuthSchemes = []smithyhttp.AuthScheme{ - internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ - Signer: options.HTTPSignerV4, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - } - } -} - -type noSmithyDocumentSerde = smithydocument.NoSerde - -type legacyEndpointContextSetter struct { - LegacyResolver EndpointResolver -} - -func (*legacyEndpointContextSetter) ID() string { - return "legacyEndpointContextSetter" -} - -func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.LegacyResolver != nil { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) - } - - return next.HandleInitialize(ctx, in) - -} -func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { - return stack.Initialize.Add(&legacyEndpointContextSetter{ - LegacyResolver: o.EndpointResolver, - }, middleware.Before) -} - -func resolveDefaultLogger(o *Options) { - if o.Logger != nil { - return - } - o.Logger = logging.Nop{} -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -func setResolvedDefaultsMode(o *Options) { - if len(o.resolvedDefaultsMode) > 0 { - return - } - - var mode aws.DefaultsMode - mode.SetFromString(string(o.DefaultsMode)) - - if mode == aws.DefaultsModeAuto { - mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) - } - - o.resolvedDefaultsMode = mode -} - -// NewFromConfig returns a new client from the provided config. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - } - resolveAWSRetryerProvider(cfg, &opts) - resolveAWSRetryMaxAttempts(cfg, &opts) - resolveAWSRetryMode(cfg, &opts) - resolveAWSEndpointResolver(cfg, &opts) - resolveUseDualStackEndpoint(cfg, &opts) - resolveUseFIPSEndpoint(cfg, &opts) - resolveBaseEndpoint(cfg, &opts) - return New(opts, optFns...) -} - -func resolveHTTPClient(o *Options) { - var buildable *awshttp.BuildableClient - - if o.HTTPClient != nil { - var ok bool - buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return - } - } else { - buildable = awshttp.NewBuildableClient() - } - - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { - if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { - dialer.Timeout = dialerTimeout - } - }) - - buildable = buildable.WithTransportOptions(func(transport *http.Transport) { - if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { - transport.TLSHandshakeTimeout = tlsHandshakeTimeout - } - }) - } - - o.HTTPClient = buildable -} - -func resolveRetryer(o *Options) { - if o.Retryer != nil { - return - } - - if len(o.RetryMode) == 0 { - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - o.RetryMode = modeConfig.RetryMode - } - } - if len(o.RetryMode) == 0 { - o.RetryMode = aws.RetryModeStandard - } - - var standardOptions []func(*retry.StandardOptions) - if v := o.RetryMaxAttempts; v != 0 { - standardOptions = append(standardOptions, func(so *retry.StandardOptions) { - so.MaxAttempts = v - }) - } - - switch o.RetryMode { - case aws.RetryModeAdaptive: - var adaptiveOptions []func(*retry.AdaptiveModeOptions) - if len(standardOptions) != 0 { - adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { - ao.StandardOptions = append(ao.StandardOptions, standardOptions...) - }) - } - o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) - - default: - o.Retryer = retry.NewStandard(standardOptions...) - } -} - -func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { - if cfg.Retryer == nil { - return - } - o.Retryer = cfg.Retryer() -} - -func resolveAWSRetryMode(cfg aws.Config, o *Options) { - if len(cfg.RetryMode) == 0 { - return - } - o.RetryMode = cfg.RetryMode -} -func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { - if cfg.RetryMaxAttempts == 0 { - return - } - o.RetryMaxAttempts = cfg.RetryMaxAttempts -} - -func finalizeRetryMaxAttempts(o *Options) { - if o.RetryMaxAttempts == 0 { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func finalizeOperationRetryMaxAttempts(o *Options, client Client) { - if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { - if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { - return - } - o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) -} - -func addClientUserAgent(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion) - if len(options.AppID) > 0 { - ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) - } - - return nil -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { - id := (*awsmiddleware.RequestUserAgent)(nil).ID() - mw, ok := stack.Build.Get(id) - if !ok { - mw = awsmiddleware.NewRequestUserAgent() - if err := stack.Build.Add(mw, middleware.After); err != nil { - return nil, err - } - } - - ua, ok := mw.(*awsmiddleware.RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) - } - - return ua, nil -} - -type HTTPSignerV4 interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error -} - -func resolveHTTPSignerV4(o *Options) { - if o.HTTPSignerV4 != nil { - return - } - o.HTTPSignerV4 = newDefaultV4Signer(*o) -} - -func newDefaultV4Signer(o Options) *v4.Signer { - return v4.NewSigner(func(so *v4.SignerOptions) { - so.Logger = o.Logger - so.LogSigning = o.ClientLogMode.IsSigning() - }) -} - -func addClientRequestID(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) -} - -func addComputeContentLength(stack *middleware.Stack) error { - return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) -} - -func addRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) -} - -func addRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) -} -func addStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) -} - -func addUnsignedPayload(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -func addComputePayloadSHA256(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -func addContentSHA256Header(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) -} - -func addRetry(stack *middleware.Stack, o Options) error { - attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { - m.LogAttempts = o.ClientLogMode.IsRetries() - }) - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { - return err - } - if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { - return err - } - return nil -} - -// resolves dual-stack endpoint configuration -func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseDualStackEndpoint = value - } - return nil -} - -// resolves FIPS endpoint configuration -func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseFIPSEndpoint = value - } - return nil -} - -func addRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) -} - -func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) - -} - -func addResponseErrorMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) - -} - -// HTTPPresignerV4 represents presigner interface used by presign url client -type HTTPPresignerV4 interface { - PresignHTTP( - ctx context.Context, credentials aws.Credentials, r *http.Request, - payloadHash string, service string, region string, signingTime time.Time, - optFns ...func(*v4.SignerOptions), - ) (url string, signedHeader http.Header, err error) -} - -// PresignOptions represents the presign client options -type PresignOptions struct { - - // ClientOptions are list of functional options to mutate client options used by - // the presign client. - ClientOptions []func(*Options) - - // Presigner is the presigner used by the presign url client - Presigner HTTPPresignerV4 -} - -func (o PresignOptions) copy() PresignOptions { - clientOptions := make([]func(*Options), len(o.ClientOptions)) - copy(clientOptions, o.ClientOptions) - o.ClientOptions = clientOptions - return o -} - -// WithPresignClientFromClientOptions is a helper utility to retrieve a function -// that takes PresignOption as input -func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) { - return withPresignClientFromClientOptions(optFns).options -} - -type withPresignClientFromClientOptions []func(*Options) - -func (w withPresignClientFromClientOptions) options(o *PresignOptions) { - o.ClientOptions = append(o.ClientOptions, w...) -} - -// PresignClient represents the presign url client -type PresignClient struct { - client *Client - options PresignOptions -} - -// NewPresignClient generates a presign client using provided API Client and -// presign options -func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient { - var options PresignOptions - for _, fn := range optFns { - fn(&options) - } - if len(options.ClientOptions) != 0 { - c = New(c.options, options.ClientOptions...) - } - - if options.Presigner == nil { - options.Presigner = newDefaultV4Signer(c.options) - } - - return &PresignClient{ - client: c, - options: options, - } -} - -func withNopHTTPClientAPIOption(o *Options) { - o.HTTPClient = smithyhttp.NopClient{} -} - -type presignContextPolyfillMiddleware struct { -} - -func (*presignContextPolyfillMiddleware) ID() string { - return "presignContextPolyfill" -} - -func (m *presignContextPolyfillMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - schemeID := rscheme.Scheme.SchemeID() - - if schemeID == "aws.auth#sigv4" || schemeID == "com.amazonaws.s3#sigv4express" { - if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningName(ctx, sn) - } - if sr, ok := smithyhttp.GetSigV4SigningRegion(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningRegion(ctx, sr) - } - } else if schemeID == "aws.auth#sigv4a" { - if sn, ok := smithyhttp.GetSigV4ASigningName(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningName(ctx, sn) - } - if sr, ok := smithyhttp.GetSigV4ASigningRegions(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningRegion(ctx, sr[0]) - } - } - - return next.HandleFinalize(ctx, in) -} - -type presignConverter PresignOptions - -func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { - if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok { - stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID()) - } - if _, ok := stack.Finalize.Get((*retry.Attempt)(nil).ID()); ok { - stack.Finalize.Remove((*retry.Attempt)(nil).ID()) - } - if _, ok := stack.Finalize.Get((*retry.MetricsHeader)(nil).ID()); ok { - stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID()) - } - stack.Deserialize.Clear() - stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) - stack.Build.Remove("UserAgent") - if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil { - return err - } - - pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ - CredentialsProvider: options.Credentials, - Presigner: c.Presigner, - LogSigning: options.ClientLogMode.IsSigning(), - }) - if _, err := stack.Finalize.Swap("Signing", pmw); err != nil { - return err - } - if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { - return err - } - // convert request to a GET request - err = query.AddAsGetRequestMiddleware(stack) - if err != nil { - return err - } - err = presignedurlcust.AddAsIsPresigningMiddleware(stack) - if err != nil { - return err - } - return nil -} - -func addRequestResponseLogging(stack *middleware.Stack, o Options) error { - return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: o.ClientLogMode.IsRequest(), - LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), - LogResponse: o.ClientLogMode.IsResponse(), - LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), - }, middleware.After) -} - -type disableHTTPSMiddleware struct { - DisableHTTPS bool -} - -func (*disableHTTPSMiddleware) ID() string { - return "disableHTTPS" -} - -func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { - req.URL.Scheme = "http" - } - - return next.HandleFinalize(ctx, in) -} - -func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Finalize.Insert(&disableHTTPSMiddleware{ - DisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "ResolveEndpointV2", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go deleted file mode 100644 index e0e2c9c2e8..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go +++ /dev/null @@ -1,439 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of temporary security credentials that you can use to access -// Amazon Web Services resources. These temporary credentials consist of an access -// key ID, a secret access key, and a security token. Typically, you use AssumeRole -// within your account or for cross-account access. For a comparison of AssumeRole -// with other API operations that produce temporary credentials, see Requesting -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. Permissions The temporary security credentials created by -// AssumeRole can be used to make API calls to any Amazon Web Services service -// with the following exception: You cannot call the Amazon Web Services STS -// GetFederationToken or GetSessionToken API operations. (Optional) You can pass -// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Passing policies to this operation returns new temporary credentials. The -// resulting session's permissions are the intersection of the role's -// identity-based policy and the session policies. You can use the role's temporary -// credentials in subsequent Amazon Web Services API calls to access resources in -// the account that owns the role. You cannot use session policies to grant more -// permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. When you create a role, you create two policies: a role -// trust policy that specifies who can assume the role, and a permissions policy -// that specifies what can be done with the role. You specify the trusted principal -// that is allowed to assume the role in the role trust policy. To assume a role -// from a different account, your Amazon Web Services account must be trusted by -// the role. The trust relationship is defined in the role's trust policy when the -// role is created. That trust policy states which accounts are allowed to delegate -// that access to users in the account. A user who wants to access a role in a -// different account must also have permissions that are delegated from the account -// administrator. The administrator must attach a policy that allows the user to -// call AssumeRole for the ARN of the role in the other account. To allow a user -// to assume a role in the same account, you can do either of the following: -// - Attach a policy to the user that allows the user to call AssumeRole (as long -// as the role's trust policy trusts the account). -// - Add the user as a principal directly in the role's trust policy. -// -// You can do either because the role’s trust policy acts as an IAM resource-based -// policy. When a resource-based policy grants access to a principal in the same -// account, no additional identity-based policy is required. For more information -// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// in the IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your -// session. These tags are called session tags. For more information about session -// tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. An administrator must grant you the permissions necessary -// to pass session tags. The administrator can also create granular permissions to -// allow you to pass only specific session tags. For more information, see -// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles -// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. Using MFA with AssumeRole (Optional) You can include -// multi-factor authentication (MFA) information when you call AssumeRole . This is -// useful for cross-account scenarios to ensure that the user that assumes the role -// has been authenticated with an Amazon Web Services MFA device. In that scenario, -// the trust policy of the role being assumed includes a condition that tests for -// MFA authentication. If the caller does not include valid MFA information, the -// request to assume the role is denied. The condition in a trust policy that tests -// for MFA authentication might look like the following example. "Condition": -// {"Bool": {"aws:MultiFactorAuthPresent": true}} For more information, see -// Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) -// in the IAM User Guide guide. To use MFA with AssumeRole , you pass values for -// the SerialNumber and TokenCode parameters. The SerialNumber value identifies -// the user's hardware or virtual MFA device. The TokenCode is the time-based -// one-time password (TOTP) that the MFA device produces. -func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { - if params == nil { - params = &AssumeRoleInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "AssumeRole", params, optFns, c.addOperationAssumeRoleMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*AssumeRoleOutput) - out.ResultMetadata = metadata - return out, nil -} - -type AssumeRoleInput struct { - - // The Amazon Resource Name (ARN) of the role to assume. - // - // This member is required. - RoleArn *string - - // An identifier for the assumed role session. Use the role session name to - // uniquely identify a session when the same role is assumed by different - // principals or for different reasons. In cross-account scenarios, the role - // session name is visible to, and can be logged by the account that owns the role. - // The role session name is also used in the ARN of the assumed role principal. - // This means that subsequent cross-account API requests that use the temporary - // security credentials will expose the role session name to the external account - // in their CloudTrail logs. The regex used to validate this parameter is a string - // of characters consisting of upper- and lower-case alphanumeric characters with - // no spaces. You can also include underscores or any of the following characters: - // =,.@- - // - // This member is required. - RoleSessionName *string - - // The duration, in seconds, of the role session. The value specified can range - // from 900 seconds (15 minutes) up to the maximum session duration set for the - // role. The maximum session duration setting can have a value from 1 hour to 12 - // hours. If you specify a value higher than this setting or the administrator - // setting (whichever is lower), the operation fails. For example, if you specify a - // session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. Role chaining limits your Amazon Web - // Services CLI or Amazon Web Services API role session to a maximum of one hour. - // When you use the AssumeRole API operation to assume a role, you can specify the - // duration of your role session with the DurationSeconds parameter. You can - // specify a parameter value of up to 43200 seconds (12 hours), depending on the - // maximum session duration setting for your role. However, if you assume a role - // using role chaining and provide a DurationSeconds parameter value greater than - // one hour, the operation fails. To learn how to view the maximum value for your - // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. By default, the value is set to 3600 seconds. The - // DurationSeconds parameter is separate from the duration of a console session - // that you might request using the returned credentials. The request to the - // federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int32 - - // A unique identifier that might be required when you assume a role in another - // account. If the administrator of the account to which the role belongs provided - // you with an external ID, then provide that value in the ExternalId parameter. - // This value can be any string, such as a passphrase or account number. A - // cross-account role is usually set up to trust everyone in an account. Therefore, - // the administrator of the trusting account might send an external ID to the - // administrator of the trusted account. That way, only someone with the ID can - // assume the role, rather than everyone in the account. For more information about - // the external ID, see How to Use an External ID When Granting Access to Your - // Amazon Web Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@:/- - ExternalId *string - - // An IAM policy in JSON format that you want to use as an inline session policy. - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use the - // role's temporary credentials in subsequent Amazon Web Services API calls to - // access resources in the account that owns the role. You cannot use session - // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The plaintext that you use for both inline and managed - // session policies can't exceed 2,048 characters. The JSON policy characters can - // be any ASCII character from the space character to the end of the valid - // character list (\u0020 through \u00FF). It can also include the tab (\u0009), - // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web - // Services conversion compresses the passed inline session policy, managed policy - // ARNs, and session tags into a packed binary format that has a separate limit. - // Your request can fail for this limit even if your plaintext meets the other - // requirements. The PackedPolicySize response element indicates by percentage how - // close the policies and tags for your request are to the upper size limit. - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to - // use as managed session policies. The policies must exist in the same account as - // the role. This parameter is optional. You can provide up to 10 managed policy - // ARNs. However, the plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. An Amazon Web Services conversion - // compresses the passed inline session policy, managed policy ARNs, and session - // tags into a packed binary format that has a separate limit. Your request can - // fail for this limit even if your plaintext meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. Passing policies to this - // operation returns new temporary credentials. The resulting session's permissions - // are the intersection of the role's identity-based policy and the session - // policies. You can use the role's temporary credentials in subsequent Amazon Web - // Services API calls to access resources in the account that owns the role. You - // cannot use session policies to grant more permissions than those allowed by the - // identity-based policy of the role that is being assumed. For more information, - // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []types.PolicyDescriptorType - - // A list of previously acquired trusted context assertions in the format of a - // JSON array. The trusted context assertion is signed and encrypted by Amazon Web - // Services STS. The following is an example of a ProvidedContext value that - // includes a single trusted context assertion and the ARN of the context provider - // from which the trusted context assertion was generated. - // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] - ProvidedContexts []types.ProvidedContext - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy of - // the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as - // GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as - // arn:aws:iam::123456789012:mfa/user ). The regex used to validate this parameter - // is a string of characters consisting of upper- and lower-case alphanumeric - // characters with no spaces. You can also include underscores or any of the - // following characters: =,.@- - SerialNumber *string - - // The source identity specified by the principal that is calling the AssumeRole - // operation. You can require users to specify a source identity when they assume a - // role. You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition key - // to further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@-. You cannot use a value that begins with the text aws: . This prefix is - // reserved for Amazon Web Services internal use. - SourceIdentity *string - - // A list of session tags that you want to pass. Each session tag consists of a - // key name and an associated value. For more information about session tags, see - // Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. This parameter is optional. You can pass up to 50 session - // tags. The plaintext session tag keys can’t exceed 128 characters, and the values - // can’t exceed 256 characters. For these and additional limits, see IAM and STS - // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. An Amazon Web Services conversion compresses the passed - // inline session policy, managed policy ARNs, and session tags into a packed - // binary format that has a separate limit. Your request can fail for this limit - // even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags for - // your request are to the upper size limit. You can pass a session tag with the - // same key as a tag that is already attached to the role. When you do, session - // tags override a role tag with the same key. Tag key–value pairs are not case - // sensitive, but case is preserved. This means that you cannot have separate - // Department and department tag keys. Assume that the role has the Department = - // Marketing tag and you pass the department = engineering session tag. Department - // and department are not saved as separate tags, and the session tag passed in - // the request takes precedence over the role tag. Additionally, if you used - // temporary credentials to perform this operation, the new session inherits any - // transitive session tags from the calling session. If you pass a session tag with - // the same key as an inherited tag, the operation fails. To view the inherited - // tags for a session, see the CloudTrail logs. For more information, see Viewing - // Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) - // in the IAM User Guide. - Tags []types.Tag - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA. (In other words, if the policy includes a condition that - // tests for MFA). If the role being assumed requires MFA and if the TokenCode - // value is missing or expired, the AssumeRole call returns an "access denied" - // error. The format for this parameter, as described by its regex pattern, is a - // sequence of six numeric digits. - TokenCode *string - - // A list of keys for session tags that you want to set as transitive. If you set - // a tag key as transitive, the corresponding key and value passes to subsequent - // sessions in a role chain. For more information, see Chaining Roles with Session - // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) - // in the IAM User Guide. This parameter is optional. When you set session tags as - // transitive, the session policy and session tags packed binary limit is not - // affected. If you choose not to specify a transitive tag key, then no tags are - // passed from this session to any subsequent sessions. - TransitiveTagKeys []string - - noSmithyDocumentSerde -} - -// Contains the response to a successful AssumeRole request, including temporary -// Amazon Web Services credentials that can be used to make Amazon Web Services -// requests. -type AssumeRoleOutput struct { - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. For - // example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the - // RoleSessionName that you specified when you called AssumeRole . - AssumedRoleUser *types.AssumedRoleUser - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. - Credentials *types.Credentials - - // A percentage value that indicates the packed size of the session policies and - // session tags combined passed in the request. The request fails if the packed - // size is greater than 100 percent, which means the policies and tags exceeded the - // allowed space. - PackedPolicySize *int32 - - // The source identity specified by the principal that is calling the AssumeRole - // operation. You can require users to specify a source identity when they assume a - // role. You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition key - // to further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- - SourceIdentity *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRole{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRole{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRole"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpAssumeRoleValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opAssumeRole(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "AssumeRole", - } -} - -// PresignAssumeRole is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignAssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &AssumeRoleInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "AssumeRole", params, clientOptFns, - c.client.addOperationAssumeRoleMiddlewares, - presignConverter(options).convertToPresignMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go deleted file mode 100644 index 2a57b72ac9..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go +++ /dev/null @@ -1,361 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of temporary security credentials for users who have been -// authenticated via a SAML authentication response. This operation provides a -// mechanism for tying an enterprise identity store or directory to role-based -// Amazon Web Services access without user-specific credentials or configuration. -// For a comparison of AssumeRoleWithSAML with the other API operations that -// produce temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. The temporary security credentials returned by this -// operation consist of an access key ID, a secret access key, and a security -// token. Applications can use these temporary security credentials to sign calls -// to Amazon Web Services services. Session Duration By default, the temporary -// security credentials created by AssumeRoleWithSAML last for one hour. However, -// you can use the optional DurationSeconds parameter to specify the duration of -// your session. Your role session lasts for the duration that you specify, or -// until the time specified in the SAML authentication response's -// SessionNotOnOrAfter value, whichever is shorter. You can provide a -// DurationSeconds value from 900 seconds (15 minutes) up to the maximum session -// duration setting for the role. This setting can have a value from 1 hour to 12 -// hours. To learn how to view the maximum value for your role, see View the -// Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you use -// the AssumeRole* API operations or the assume-role* CLI commands. However the -// limit does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) -// limits your CLI or Amazon Web Services API role session to a maximum of one -// hour. When you use the AssumeRole API operation to assume a role, you can -// specify the duration of your role session with the DurationSeconds parameter. -// You can specify a parameter value of up to 43200 seconds (12 hours), depending -// on the maximum session duration setting for your role. However, if you assume a -// role using role chaining and provide a DurationSeconds parameter value greater -// than one hour, the operation fails. Permissions The temporary security -// credentials created by AssumeRoleWithSAML can be used to make API calls to any -// Amazon Web Services service with the following exception: you cannot call the -// STS GetFederationToken or GetSessionToken API operations. (Optional) You can -// pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Passing policies to this operation returns new temporary credentials. The -// resulting session's permissions are the intersection of the role's -// identity-based policy and the session policies. You can use the role's temporary -// credentials in subsequent Amazon Web Services API calls to access resources in -// the account that owns the role. You cannot use session policies to grant more -// permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. Calling AssumeRoleWithSAML does not require the use of -// Amazon Web Services security credentials. The identity of the caller is -// validated by using keys in the metadata document that is uploaded for the SAML -// provider entity for your identity provider. Calling AssumeRoleWithSAML can -// result in an entry in your CloudTrail logs. The entry includes the value in the -// NameID element of the SAML assertion. We recommend that you use a NameIDType -// that is not associated with any personally identifiable information (PII). For -// example, you could instead use the persistent identifier ( -// urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). Tags (Optional) You can -// configure your IdP to pass attributes into your SAML assertion as session tags. -// Each session tag consists of a key name and an associated value. For more -// information about session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session -// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. -// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. An Amazon Web Services conversion compresses the passed -// inline session policy, managed policy ARNs, and session tags into a packed -// binary format that has a separate limit. Your request can fail for this limit -// even if your plaintext meets the other requirements. The PackedPolicySize -// response element indicates by percentage how close the policies and tags for -// your request are to the upper size limit. You can pass a session tag with the -// same key as a tag that is attached to the role. When you do, session tags -// override the role's tags with the same key. An administrator must grant you the -// permissions necessary to pass session tags. The administrator can also create -// granular permissions to allow you to pass only specific session tags. For more -// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles -// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. SAML Configuration Before your application can call -// AssumeRoleWithSAML , you must configure your SAML identity provider (IdP) to -// issue the claims required by Amazon Web Services. Additionally, you must use -// Identity and Access Management (IAM) to create a SAML provider entity in your -// Amazon Web Services account that represents your identity provider. You must -// also create an IAM role that specifies this SAML provider in its trust policy. -// For more information, see the following resources: -// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. -// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. -// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. -// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. -func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { - if params == nil { - params = &AssumeRoleWithSAMLInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithSAML", params, optFns, c.addOperationAssumeRoleWithSAMLMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*AssumeRoleWithSAMLOutput) - out.ResultMetadata = metadata - return out, nil -} - -type AssumeRoleWithSAMLInput struct { - - // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the - // IdP. - // - // This member is required. - PrincipalArn *string - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // This member is required. - RoleArn *string - - // The base64 encoded SAML authentication response provided by the IdP. For more - // information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the IAM User Guide. - // - // This member is required. - SAMLAssertion *string - - // The duration, in seconds, of the role session. Your role session lasts for the - // duration that you specify for the DurationSeconds parameter, or until the time - // specified in the SAML authentication response's SessionNotOnOrAfter value, - // whichever is shorter. You can provide a DurationSeconds value from 900 seconds - // (15 minutes) up to the maximum session duration setting for the role. This - // setting can have a value from 1 hour to 12 hours. If you specify a value higher - // than this setting, the operation fails. For example, if you specify a session - // duration of 12 hours, but your administrator set the maximum session duration to - // 6 hours, your operation fails. To learn how to view the maximum value for your - // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. By default, the value is set to 3600 seconds. The - // DurationSeconds parameter is separate from the duration of a console session - // that you might request using the returned credentials. The request to the - // federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int32 - - // An IAM policy in JSON format that you want to use as an inline session policy. - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use the - // role's temporary credentials in subsequent Amazon Web Services API calls to - // access resources in the account that owns the role. You cannot use session - // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The plaintext that you use for both inline and managed - // session policies can't exceed 2,048 characters. The JSON policy characters can - // be any ASCII character from the space character to the end of the valid - // character list (\u0020 through \u00FF). It can also include the tab (\u0009), - // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web - // Services conversion compresses the passed inline session policy, managed policy - // ARNs, and session tags into a packed binary format that has a separate limit. - // Your request can fail for this limit even if your plaintext meets the other - // requirements. The PackedPolicySize response element indicates by percentage how - // close the policies and tags for your request are to the upper size limit. - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to - // use as managed session policies. The policies must exist in the same account as - // the role. This parameter is optional. You can provide up to 10 managed policy - // ARNs. However, the plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. An Amazon Web Services conversion - // compresses the passed inline session policy, managed policy ARNs, and session - // tags into a packed binary format that has a separate limit. Your request can - // fail for this limit even if your plaintext meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. Passing policies to this - // operation returns new temporary credentials. The resulting session's permissions - // are the intersection of the role's identity-based policy and the session - // policies. You can use the role's temporary credentials in subsequent Amazon Web - // Services API calls to access resources in the account that owns the role. You - // cannot use session policies to grant more permissions than those allowed by the - // identity-based policy of the role that is being assumed. For more information, - // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []types.PolicyDescriptorType - - noSmithyDocumentSerde -} - -// Contains the response to a successful AssumeRoleWithSAML request, including -// temporary Amazon Web Services credentials that can be used to make Amazon Web -// Services requests. -type AssumeRoleWithSAMLOutput struct { - - // The identifiers for the temporary security credentials that the operation - // returns. - AssumedRoleUser *types.AssumedRoleUser - - // The value of the Recipient attribute of the SubjectConfirmationData element of - // the SAML assertion. - Audience *string - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. - Credentials *types.Credentials - - // The value of the Issuer element of the SAML assertion. - Issuer *string - - // A hash value based on the concatenation of the following: - // - The Issuer response value. - // - The Amazon Web Services account ID. - // - The friendly name (the last part of the ARN) of the SAML provider in IAM. - // The combination of NameQualifier and Subject can be used to uniquely identify a - // user. The following pseudocode shows how the hash value is calculated: BASE64 ( - // SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) - NameQualifier *string - - // A percentage value that indicates the packed size of the session policies and - // session tags combined passed in the request. The request fails if the packed - // size is greater than 100 percent, which means the policies and tags exceeded the - // allowed space. - PackedPolicySize *int32 - - // The value in the SourceIdentity attribute in the SAML assertion. You can - // require users to set a source identity value when they assume a role. You do - // this by using the sts:SourceIdentity condition key in a role trust policy. That - // way, actions that are taken with the role are associated with that user. After - // the source identity is set, the value cannot be changed. It is present in the - // request for all actions that are taken by the role and persists across chained - // role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) - // sessions. You can configure your SAML identity provider to use an attribute - // associated with your users, like user name or email, as the source identity when - // calling AssumeRoleWithSAML . You do this by adding an attribute to the SAML - // assertion. For more information about using source identity, see Monitor and - // control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- - SourceIdentity *string - - // The value of the NameID element in the Subject element of the SAML assertion. - Subject *string - - // The format of the name ID, as defined by the Format attribute in the NameID - // element of the SAML assertion. Typical examples of the format are transient or - // persistent . If the format includes the prefix - // urn:oasis:names:tc:SAML:2.0:nameid-format , that prefix is removed. For example, - // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient . - // If the format includes any other prefix, the format is returned with no - // modifications. - SubjectType *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithSAML{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithSAML{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithSAML"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opAssumeRoleWithSAML(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "AssumeRoleWithSAML", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go deleted file mode 100644 index 98108ce6af..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ /dev/null @@ -1,380 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of temporary security credentials for users who have been -// authenticated in a mobile or web application with a web identity provider. -// Example providers include the OAuth 2.0 providers Login with Amazon and -// Facebook, or any OpenID Connect-compatible identity provider such as Google or -// Amazon Cognito federated identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) -// . For mobile applications, we recommend that you use Amazon Cognito. You can use -// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and the Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) -// to uniquely identify a user. You can also supply the user with a consistent -// identity throughout the lifetime of an application. To learn more about Amazon -// Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) -// in Amazon Cognito Developer Guide. Calling AssumeRoleWithWebIdentity does not -// require the use of Amazon Web Services security credentials. Therefore, you can -// distribute an application (for example, on mobile devices) that requests -// temporary security credentials without including long-term Amazon Web Services -// credentials in the application. You also don't need to deploy server-based proxy -// services that use long-term Amazon Web Services credentials. Instead, the -// identity of the caller is validated by using a token from the web identity -// provider. For a comparison of AssumeRoleWithWebIdentity with the other API -// operations that produce temporary credentials, see Requesting Temporary -// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. The temporary security credentials returned by this API -// consist of an access key ID, a secret access key, and a security token. -// Applications can use these temporary security credentials to sign calls to -// Amazon Web Services service API operations. Session Duration By default, the -// temporary security credentials created by AssumeRoleWithWebIdentity last for -// one hour. However, you can use the optional DurationSeconds parameter to -// specify the duration of your session. You can provide a value from 900 seconds -// (15 minutes) up to the maximum session duration setting for the role. This -// setting can have a value from 1 hour to 12 hours. To learn how to view the -// maximum value for your role, see View the Maximum Session Duration Setting for -// a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you use -// the AssumeRole* API operations or the assume-role* CLI commands. However the -// limit does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. Permissions The temporary security credentials created by -// AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web -// Services service with the following exception: you cannot call the STS -// GetFederationToken or GetSessionToken API operations. (Optional) You can pass -// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Passing policies to this operation returns new temporary credentials. The -// resulting session's permissions are the intersection of the role's -// identity-based policy and the session policies. You can use the role's temporary -// credentials in subsequent Amazon Web Services API calls to access resources in -// the account that owns the role. You cannot use session policies to grant more -// permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. Tags (Optional) You can configure your IdP to pass -// attributes into your web identity token as session tags. Each session tag -// consists of a key name and an associated value. For more information about -// session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session -// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. -// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. An Amazon Web Services conversion compresses the passed -// inline session policy, managed policy ARNs, and session tags into a packed -// binary format that has a separate limit. Your request can fail for this limit -// even if your plaintext meets the other requirements. The PackedPolicySize -// response element indicates by percentage how close the policies and tags for -// your request are to the upper size limit. You can pass a session tag with the -// same key as a tag that is attached to the role. When you do, the session tag -// overrides the role tag with the same key. An administrator must grant you the -// permissions necessary to pass session tags. The administrator can also create -// granular permissions to allow you to pass only specific session tags. For more -// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles -// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. Identities Before your application can call -// AssumeRoleWithWebIdentity , you must have an identity token from a supported -// identity provider and create a role that the application can assume. The role -// that your application assumes must trust the identity provider that is -// associated with the identity token. In other words, the identity provider must -// be specified in the role's trust policy. Calling AssumeRoleWithWebIdentity can -// result in an entry in your CloudTrail logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) -// of the provided web identity token. We recommend that you avoid using any -// personally identifiable information (PII) in this field. For example, you could -// instead use a GUID or a pairwise identifier, as suggested in the OIDC -// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes) -// . For more information about how to use web identity federation and the -// AssumeRoleWithWebIdentity API, see the following resources: -// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// . -// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/) -// . Walk through the process of authenticating through Login with Amazon, -// Facebook, or Google, getting temporary security credentials, and then using -// those credentials to make a request to Amazon Web Services. -// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) -// . These toolkits contain sample apps that show how to invoke the identity -// providers. The toolkits then show how to use the information from these -// providers to get and use temporary security credentials. -// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications) -// . This article discusses web identity federation and shows an example of how to -// use web identity federation to get access to content in Amazon S3. -func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { - if params == nil { - params = &AssumeRoleWithWebIdentityInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithWebIdentity", params, optFns, c.addOperationAssumeRoleWithWebIdentityMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*AssumeRoleWithWebIdentityOutput) - out.ResultMetadata = metadata - return out, nil -} - -type AssumeRoleWithWebIdentityInput struct { - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // This member is required. - RoleArn *string - - // An identifier for the assumed role session. Typically, you pass the name or - // identifier that is associated with the user who is using your application. That - // way, the temporary security credentials that your application will use are - // associated with that user. This session name is included as part of the ARN and - // assumed role ID in the AssumedRoleUser response element. The regex used to - // validate this parameter is a string of characters consisting of upper- and - // lower-case alphanumeric characters with no spaces. You can also include - // underscores or any of the following characters: =,.@- - // - // This member is required. - RoleSessionName *string - - // The OAuth 2.0 access token or OpenID Connect ID token that is provided by the - // identity provider. Your application must get this token by authenticating the - // user who is using your application with a web identity provider before the - // application makes an AssumeRoleWithWebIdentity call. Only tokens with RSA - // algorithms (RS256) are supported. - // - // This member is required. - WebIdentityToken *string - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) up to the maximum session duration setting for the role. - // This setting can have a value from 1 hour to 12 hours. If you specify a value - // higher than this setting, the operation fails. For example, if you specify a - // session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. By default, the value is set to 3600 seconds. The - // DurationSeconds parameter is separate from the duration of a console session - // that you might request using the returned credentials. The request to the - // federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int32 - - // An IAM policy in JSON format that you want to use as an inline session policy. - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use the - // role's temporary credentials in subsequent Amazon Web Services API calls to - // access resources in the account that owns the role. You cannot use session - // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The plaintext that you use for both inline and managed - // session policies can't exceed 2,048 characters. The JSON policy characters can - // be any ASCII character from the space character to the end of the valid - // character list (\u0020 through \u00FF). It can also include the tab (\u0009), - // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web - // Services conversion compresses the passed inline session policy, managed policy - // ARNs, and session tags into a packed binary format that has a separate limit. - // Your request can fail for this limit even if your plaintext meets the other - // requirements. The PackedPolicySize response element indicates by percentage how - // close the policies and tags for your request are to the upper size limit. - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to - // use as managed session policies. The policies must exist in the same account as - // the role. This parameter is optional. You can provide up to 10 managed policy - // ARNs. However, the plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. An Amazon Web Services conversion - // compresses the passed inline session policy, managed policy ARNs, and session - // tags into a packed binary format that has a separate limit. Your request can - // fail for this limit even if your plaintext meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. Passing policies to this - // operation returns new temporary credentials. The resulting session's permissions - // are the intersection of the role's identity-based policy and the session - // policies. You can use the role's temporary credentials in subsequent Amazon Web - // Services API calls to access resources in the account that owns the role. You - // cannot use session policies to grant more permissions than those allowed by the - // identity-based policy of the role that is being assumed. For more information, - // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []types.PolicyDescriptorType - - // The fully qualified host component of the domain name of the OAuth 2.0 identity - // provider. Do not specify this value for an OpenID Connect identity provider. - // Currently www.amazon.com and graph.facebook.com are the only supported identity - // providers for OAuth 2.0 access tokens. Do not include URL schemes and port - // numbers. Do not specify this value for OpenID Connect ID tokens. - ProviderId *string - - noSmithyDocumentSerde -} - -// Contains the response to a successful AssumeRoleWithWebIdentity request, -// including temporary Amazon Web Services credentials that can be used to make -// Amazon Web Services requests. -type AssumeRoleWithWebIdentityOutput struct { - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. For - // example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the - // RoleSessionName that you specified when you called AssumeRole . - AssumedRoleUser *types.AssumedRoleUser - - // The intended audience (also known as client ID) of the web identity token. This - // is traditionally the client identifier issued to the application that requested - // the web identity token. - Audience *string - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. The size of the security token that STS API - // operations return is not fixed. We strongly recommend that you make no - // assumptions about the maximum size. - Credentials *types.Credentials - - // A percentage value that indicates the packed size of the session policies and - // session tags combined passed in the request. The request fails if the packed - // size is greater than 100 percent, which means the policies and tags exceeded the - // allowed space. - PackedPolicySize *int32 - - // The issuing authority of the web identity token presented. For OpenID Connect - // ID tokens, this contains the value of the iss field. For OAuth 2.0 access - // tokens, this contains the value of the ProviderId parameter that was passed in - // the AssumeRoleWithWebIdentity request. - Provider *string - - // The value of the source identity that is returned in the JSON web token (JWT) - // from the identity provider. You can require users to set a source identity value - // when they assume a role. You do this by using the sts:SourceIdentity condition - // key in a role trust policy. That way, actions that are taken with the role are - // associated with that user. After the source identity is set, the value cannot be - // changed. It is present in the request for all actions that are taken by the role - // and persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) - // sessions. You can configure your identity provider to use an attribute - // associated with your users, like user name or email, as the source identity when - // calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON - // web token. To learn more about OIDC tokens and claims, see Using Tokens with - // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) - // in the Amazon Cognito Developer Guide. For more information about using source - // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- - SourceIdentity *string - - // The unique user identifier that is returned by the identity provider. This - // identifier is associated with the WebIdentityToken that was submitted with the - // AssumeRoleWithWebIdentity call. The identifier is typically unique to the user - // and the application that acquired the WebIdentityToken (pairwise identifier). - // For OpenID Connect ID tokens, this field contains the value returned by the - // identity provider as the token's sub (Subject) claim. - SubjectFromWebIdentityToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithWebIdentity{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithWebIdentity{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithWebIdentity"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "AssumeRoleWithWebIdentity", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go deleted file mode 100644 index b4ad54ab2f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go +++ /dev/null @@ -1,160 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Decodes additional information about the authorization status of a request from -// an encoded message returned in response to an Amazon Web Services request. For -// example, if a user is not authorized to perform an operation that he or she has -// requested, the request returns a Client.UnauthorizedOperation response (an HTTP -// 403 response). Some Amazon Web Services operations additionally return an -// encoded message that can provide details about this authorization failure. Only -// certain Amazon Web Services operations return an encoded authorization message. -// The documentation for an individual operation indicates whether that operation -// returns an encoded message in addition to returning an HTTP code. The message is -// encoded because the details of the authorization status can contain privileged -// information that the user who requested the operation should not see. To decode -// an authorization status message, a user must be granted permissions through an -// IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// to request the DecodeAuthorizationMessage ( sts:DecodeAuthorizationMessage ) -// action. The decoded message includes the following type of information: -// - Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see Determining Whether a -// Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. -// - The principal who made the request. -// - The requested action. -// - The requested resource. -// - The values of condition keys in the context of the user's request. -func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { - if params == nil { - params = &DecodeAuthorizationMessageInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DecodeAuthorizationMessage", params, optFns, c.addOperationDecodeAuthorizationMessageMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DecodeAuthorizationMessageOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DecodeAuthorizationMessageInput struct { - - // The encoded message that was returned with the response. - // - // This member is required. - EncodedMessage *string - - noSmithyDocumentSerde -} - -// A document that contains additional information about the authorization status -// of a request from an encoded message that is returned in response to an Amazon -// Web Services request. -type DecodeAuthorizationMessageOutput struct { - - // The API returns a response with the decoded message. - DecodedMessage *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpDecodeAuthorizationMessage{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDecodeAuthorizationMessage{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DecodeAuthorizationMessage"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opDecodeAuthorizationMessage(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DecodeAuthorizationMessage", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go deleted file mode 100644 index 1f7cbcc2bb..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns the account identifier for the specified access key ID. Access keys -// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE ) and -// a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). -// For more information about access keys, see Managing Access Keys for IAM Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) -// in the IAM User Guide. When you pass an access key ID to this operation, it -// returns the ID of the Amazon Web Services account to which the keys belong. -// Access key IDs beginning with AKIA are long-term credentials for an IAM user or -// the Amazon Web Services account root user. Access key IDs beginning with ASIA -// are temporary credentials that are created using STS operations. If the account -// in the response belongs to you, you can sign in as the root user and review your -// root user access keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) -// to learn which IAM user owns the keys. To learn who requested the temporary -// credentials for an ASIA access key, view the STS events in your CloudTrail logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) -// in the IAM User Guide. This operation does not indicate the state of the access -// key. The key might be active, inactive, or deleted. Active keys might not have -// permissions to perform an operation. Providing a deleted access key might return -// an error that the key doesn't exist. -func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) { - if params == nil { - params = &GetAccessKeyInfoInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetAccessKeyInfo", params, optFns, c.addOperationGetAccessKeyInfoMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetAccessKeyInfoOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetAccessKeyInfoInput struct { - - // The identifier of an access key. This parameter allows (through its regex - // pattern) a string of characters that can consist of any upper- or lowercase - // letter or digit. - // - // This member is required. - AccessKeyId *string - - noSmithyDocumentSerde -} - -type GetAccessKeyInfoOutput struct { - - // The number used to identify the Amazon Web Services account. - Account *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpGetAccessKeyInfo{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetAccessKeyInfo{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetAccessKeyInfo"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetAccessKeyInfo(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetAccessKeyInfo", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go deleted file mode 100644 index acb7ede44f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go +++ /dev/null @@ -1,170 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns details about the IAM user or role whose credentials are used to call -// the operation. No permissions are required to perform this operation. If an -// administrator attaches a policy to your identity that explicitly denies access -// to the sts:GetCallerIdentity action, you can still perform this operation. -// Permissions are not required because the same information is returned when -// access is denied. To view an example response, see I Am Not Authorized to -// Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) -// in the IAM User Guide. -func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) { - if params == nil { - params = &GetCallerIdentityInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetCallerIdentity", params, optFns, c.addOperationGetCallerIdentityMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetCallerIdentityOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetCallerIdentityInput struct { - noSmithyDocumentSerde -} - -// Contains the response to a successful GetCallerIdentity request, including -// information about the entity making the request. -type GetCallerIdentityOutput struct { - - // The Amazon Web Services account ID number of the account that owns or contains - // the calling entity. - Account *string - - // The Amazon Web Services ARN associated with the calling entity. - Arn *string - - // The unique identifier of the calling entity. The exact value depends on the - // type of entity that is making the call. The values returned are those listed in - // the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) - // found on the Policy Variables reference page in the IAM User Guide. - UserId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpGetCallerIdentity{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetCallerIdentity{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetCallerIdentity"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetCallerIdentity(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetCallerIdentity", - } -} - -// PresignGetCallerIdentity is used to generate a presigned HTTP Request which -// contains presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignGetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &GetCallerIdentityInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "GetCallerIdentity", params, clientOptFns, - c.client.addOperationGetCallerIdentityMiddlewares, - presignConverter(options).convertToPresignMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go deleted file mode 100644 index 3679618cb5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go +++ /dev/null @@ -1,320 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of temporary security credentials (consisting of an access key -// ID, a secret access key, and a security token) for a user. A typical use is in a -// proxy application that gets temporary security credentials on behalf of -// distributed applications inside a corporate network. You must call the -// GetFederationToken operation using the long-term security credentials of an IAM -// user. As a result, this call is appropriate in contexts where those credentials -// can be safeguarded, usually in a server-based application. For a comparison of -// GetFederationToken with the other API operations that produce temporary -// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. Although it is possible to call GetFederationToken using -// the security credentials of an Amazon Web Services account root user rather than -// an IAM user that you create for the purpose of a proxy application, we do not -// recommend it. For more information, see Safeguard your root user credentials -// and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) -// in the IAM User Guide. You can create a mobile-based or browser-based app that -// can authenticate users using a web identity provider like Login with Amazon, -// Facebook, Google, or an OpenID Connect-compatible identity provider. In this -// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) -// or AssumeRoleWithWebIdentity . For more information, see Federation Through a -// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. Session duration The temporary credentials are valid for -// the specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600 -// seconds (36 hours). The default session duration is 43,200 seconds (12 hours). -// Temporary credentials obtained by using the root user credentials have a maximum -// duration of 3,600 seconds (1 hour). Permissions You can use the temporary -// credentials created by GetFederationToken in any Amazon Web Services service -// with the following exceptions: -// - You cannot call any IAM operations using the CLI or the Amazon Web Services -// API. This limitation does not apply to console sessions. -// - You cannot call any STS operations except GetCallerIdentity . -// -// You can use temporary credentials for single sign-on (SSO) to the console. You -// must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Though the session policy parameters are optional, if you do not pass a policy, -// then the resulting federated user session has no permissions. When you pass -// session policies, the session permissions are the intersection of the IAM user -// policies and the session policies that you pass. This gives you a way to further -// restrict the permissions for a federated user. You cannot use session policies -// to grant more permissions than those that are defined in the permissions policy -// of the IAM user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. For information about using GetFederationToken to create -// temporary security credentials, see GetFederationToken—Federation Through a -// Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken) -// . You can use the credentials to access a resource that has a resource-based -// policy. If that policy specifically references the federated user session in the -// Principal element of the policy, the session has the permissions allowed by the -// policy. These permissions are granted in addition to the permissions granted by -// the session policies. Tags (Optional) You can pass tag key-value pairs to your -// session. These are called session tags. For more information about session tags, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You can create a mobile-based or browser-based app that -// can authenticate users using a web identity provider like Login with Amazon, -// Facebook, Google, or an OpenID Connect-compatible identity provider. In this -// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) -// or AssumeRoleWithWebIdentity . For more information, see Federation Through a -// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. An administrator must grant you the permissions necessary -// to pass session tags. The administrator can also create granular permissions to -// allow you to pass only specific session tags. For more information, see -// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. Tag key–value pairs are not case sensitive, but case is -// preserved. This means that you cannot have separate Department and department -// tag keys. Assume that the user that you are federating has the Department = -// Marketing tag and you pass the department = engineering session tag. Department -// and department are not saved as separate tags, and the session tag passed in -// the request takes precedence over the user tag. -func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { - if params == nil { - params = &GetFederationTokenInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetFederationToken", params, optFns, c.addOperationGetFederationTokenMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetFederationTokenOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetFederationTokenInput struct { - - // The name of the federated user. The name is used as an identifier for the - // temporary security credentials (such as Bob ). For example, you can reference - // the federated user name in a resource-based policy, such as in an Amazon S3 - // bucket policy. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- - // - // This member is required. - Name *string - - // The duration, in seconds, that the session should last. Acceptable durations - // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds - // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained - // using root user credentials are restricted to a maximum of 3,600 seconds (one - // hour). If the specified duration is longer than one hour, the session obtained - // by using root user credentials defaults to one hour. - DurationSeconds *int32 - - // An IAM policy in JSON format that you want to use as an inline session policy. - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policy Amazon - // Resource Names (ARNs) to use as managed session policies. This parameter is - // optional. However, if you do not pass any session policies, then the resulting - // federated user session has no permissions. When you pass session policies, the - // session permissions are the intersection of the IAM user policies and the - // session policies that you pass. This gives you a way to further restrict the - // permissions for a federated user. You cannot use session policies to grant more - // permissions than those that are defined in the permissions policy of the IAM - // user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The resulting credentials can be used to access a - // resource that has a resource-based policy. If that policy specifically - // references the federated user session in the Principal element of the policy, - // the session has the permissions allowed by the policy. These permissions are - // granted in addition to the permissions that are granted by the session policies. - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. An Amazon Web Services conversion compresses the - // passed inline session policy, managed policy ARNs, and session tags into a - // packed binary format that has a separate limit. Your request can fail for this - // limit even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags for - // your request are to the upper size limit. - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to - // use as a managed session policy. The policies must exist in the same account as - // the IAM user that is requesting federated access. You must pass an inline or - // managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policy Amazon - // Resource Names (ARNs) to use as managed session policies. The plaintext that you - // use for both inline and managed session policies can't exceed 2,048 characters. - // You can provide up to 10 managed policy ARNs. For more information about ARNs, - // see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. This parameter is optional. - // However, if you do not pass any session policies, then the resulting federated - // user session has no permissions. When you pass session policies, the session - // permissions are the intersection of the IAM user policies and the session - // policies that you pass. This gives you a way to further restrict the permissions - // for a federated user. You cannot use session policies to grant more permissions - // than those that are defined in the permissions policy of the IAM user. For more - // information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The resulting credentials can be used to access a - // resource that has a resource-based policy. If that policy specifically - // references the federated user session in the Principal element of the policy, - // the session has the permissions allowed by the policy. These permissions are - // granted in addition to the permissions that are granted by the session policies. - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - PolicyArns []types.PolicyDescriptorType - - // A list of session tags. Each session tag consists of a key name and an - // associated value. For more information about session tags, see Passing Session - // Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. This parameter is optional. You can pass up to 50 session - // tags. The plaintext session tag keys can’t exceed 128 characters and the values - // can’t exceed 256 characters. For these and additional limits, see IAM and STS - // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. An Amazon Web Services conversion compresses the passed - // inline session policy, managed policy ARNs, and session tags into a packed - // binary format that has a separate limit. Your request can fail for this limit - // even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags for - // your request are to the upper size limit. You can pass a session tag with the - // same key as a tag that is already attached to the user you are federating. When - // you do, session tags override a user tag with the same key. Tag key–value pairs - // are not case sensitive, but case is preserved. This means that you cannot have - // separate Department and department tag keys. Assume that the role has the - // Department = Marketing tag and you pass the department = engineering session - // tag. Department and department are not saved as separate tags, and the session - // tag passed in the request takes precedence over the role tag. - Tags []types.Tag - - noSmithyDocumentSerde -} - -// Contains the response to a successful GetFederationToken request, including -// temporary Amazon Web Services credentials that can be used to make Amazon Web -// Services requests. -type GetFederationTokenOutput struct { - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. - Credentials *types.Credentials - - // Identifiers for the federated user associated with the credentials (such as - // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob ). You can use - // the federated user's ARN in your resource-based policies, such as an Amazon S3 - // bucket policy. - FederatedUser *types.FederatedUser - - // A percentage value that indicates the packed size of the session policies and - // session tags combined passed in the request. The request fails if the packed - // size is greater than 100 percent, which means the policies and tags exceeded the - // allowed space. - PackedPolicySize *int32 - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpGetFederationToken{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetFederationToken{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetFederationToken"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetFederationToken(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetFederationToken", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go deleted file mode 100644 index 751fb147d4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ /dev/null @@ -1,203 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of temporary credentials for an Amazon Web Services account or -// IAM user. The credentials consist of an access key ID, a secret access key, and -// a security token. Typically, you use GetSessionToken if you want to use MFA to -// protect programmatic calls to specific Amazon Web Services API operations like -// Amazon EC2 StopInstances . MFA-enabled IAM users must call GetSessionToken and -// submit an MFA code that is associated with their MFA device. Using the temporary -// security credentials that the call returns, IAM users can then make programmatic -// calls to API operations that require MFA authentication. An incorrect MFA code -// causes the API to return an access denied error. For a comparison of -// GetSessionToken with the other API operations that produce temporary -// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. No permissions are required for users to perform this -// operation. The purpose of the sts:GetSessionToken operation is to authenticate -// the user using MFA. You cannot use policies to control authentication -// operations. For more information, see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) -// in the IAM User Guide. Session Duration The GetSessionToken operation must be -// called by using the long-term Amazon Web Services security credentials of an IAM -// user. Credentials that are created by IAM users are valid for the duration that -// you specify. This duration can range from 900 seconds (15 minutes) up to a -// maximum of 129,600 seconds (36 hours), with a default of 43,200 seconds (12 -// hours). Credentials based on account credentials can range from 900 seconds (15 -// minutes) up to 3,600 seconds (1 hour), with a default of 1 hour. Permissions The -// temporary security credentials created by GetSessionToken can be used to make -// API calls to any Amazon Web Services service with the following exceptions: -// - You cannot call any IAM API operations unless MFA authentication -// information is included in the request. -// - You cannot call any STS API except AssumeRole or GetCallerIdentity . -// -// The credentials that GetSessionToken returns are based on permissions -// associated with the IAM user whose credentials were used to call the operation. -// The temporary credentials have the same permissions as the IAM user. Although it -// is possible to call GetSessionToken using the security credentials of an Amazon -// Web Services account root user rather than an IAM user, we do not recommend it. -// If GetSessionToken is called using root user credentials, the temporary -// credentials have root user permissions. For more information, see Safeguard -// your root user credentials and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) -// in the IAM User Guide For more information about using GetSessionToken to -// create temporary credentials, see Temporary Credentials for Users in Untrusted -// Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) -// in the IAM User Guide. -func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { - if params == nil { - params = &GetSessionTokenInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetSessionToken", params, optFns, c.addOperationGetSessionTokenMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetSessionTokenOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetSessionTokenInput struct { - - // The duration, in seconds, that the credentials should remain valid. Acceptable - // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 - // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions for - // Amazon Web Services account owners are restricted to a maximum of 3,600 seconds - // (one hour). If the duration is longer than one hour, the session for Amazon Web - // Services account owners defaults to one hour. - DurationSeconds *int32 - - // The identification number of the MFA device that is associated with the IAM - // user who is making the GetSessionToken call. Specify this value if the IAM user - // has a policy that requires MFA authentication. The value is either the serial - // number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name - // (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You - // can find the device for an IAM user by going to the Amazon Web Services - // Management Console and viewing the user's security credentials. The regex used - // to validate this parameter is a string of characters consisting of upper- and - // lower-case alphanumeric characters with no spaces. You can also include - // underscores or any of the following characters: =,.@:/- - SerialNumber *string - - // The value provided by the MFA device, if MFA is required. If any policy - // requires the IAM user to submit an MFA code, specify this value. If MFA - // authentication is required, the user must provide a code when requesting a set - // of temporary security credentials. A user who fails to provide the code receives - // an "access denied" response when requesting resources that require MFA - // authentication. The format for this parameter, as described by its regex - // pattern, is a sequence of six numeric digits. - TokenCode *string - - noSmithyDocumentSerde -} - -// Contains the response to a successful GetSessionToken request, including -// temporary Amazon Web Services credentials that can be used to make Amazon Web -// Services requests. -type GetSessionTokenOutput struct { - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. - Credentials *types.Credentials - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSessionToken{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetSessionToken{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetSessionToken"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetSessionToken(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetSessionToken", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go deleted file mode 100644 index 9db5bfd434..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go +++ /dev/null @@ -1,296 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { - params.Region = options.Region -} - -type setLegacyContextSigningOptionsMiddleware struct { -} - -func (*setLegacyContextSigningOptionsMiddleware) ID() string { - return "setLegacyContextSigningOptions" -} - -func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - schemeID := rscheme.Scheme.SchemeID() - - if sn := awsmiddleware.GetSigningName(ctx); sn != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) - } - } - - if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) - } - } - - return next.HandleFinalize(ctx, in) -} - -func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) -} - -type withAnonymous struct { - resolver AuthSchemeResolver -} - -var _ AuthSchemeResolver = (*withAnonymous)(nil) - -func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - opts, err := v.resolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return nil, err - } - - opts = append(opts, &smithyauth.Option{ - SchemeID: smithyauth.SchemeIDAnonymous, - }) - return opts, nil -} - -func wrapWithAnonymousAuth(options *Options) { - if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { - return - } - - options.AuthSchemeResolver = &withAnonymous{ - resolver: options.AuthSchemeResolver, - } -} - -// AuthResolverParameters contains the set of inputs necessary for auth scheme -// resolution. -type AuthResolverParameters struct { - // The name of the operation being invoked. - Operation string - - // The region in which the operation is being invoked. - Region string -} - -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { - params := &AuthResolverParameters{ - Operation: operation, - } - - bindAuthParamsRegion(params, input, options) - - return params -} - -// AuthSchemeResolver returns a set of possible authentication options for an -// operation. -type AuthSchemeResolver interface { - ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) -} - -type defaultAuthSchemeResolver struct{} - -var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) - -func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - if overrides, ok := operationAuthOptions[params.Operation]; ok { - return overrides(params), nil - } - return serviceAuthOptions(params), nil -} - -var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ - "AssumeRoleWithSAML": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "AssumeRoleWithWebIdentity": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, -} - -func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "sts") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - return props - }(), - }, - } -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) - options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) - } - - scheme, ok := m.selectScheme(options) - if !ok { - return out, metadata, fmt.Errorf("could not select an auth scheme") - } - - ctx = setResolvedAuthScheme(ctx, scheme) - return next.HandleFinalize(ctx, in) -} - -func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - for _, option := range options { - if option.SchemeID == smithyauth.SchemeIDAnonymous { - return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true - } - - for _, scheme := range m.options.AuthSchemes { - if scheme.SchemeID() != option.SchemeID { - continue - } - - if scheme.IdentityResolver(m.options) != nil { - return newResolvedAuthScheme(scheme, option), true - } - } - } - - return nil, false -} - -type resolvedAuthSchemeKey struct{} - -type resolvedAuthScheme struct { - Scheme smithyhttp.AuthScheme - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { - return &resolvedAuthScheme{ - Scheme: scheme, - IdentityProperties: option.IdentityProperties, - SignerProperties: option.SignerProperties, - } -} - -func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { - return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) -} - -func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { - v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) - return v -} - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - resolver := rscheme.Scheme.IdentityResolver(m.options) - if resolver == nil { - return out, metadata, fmt.Errorf("no identity resolver") - } - - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) - if err != nil { - return out, metadata, fmt.Errorf("get identity: %w", err) - } - - ctx = setIdentity(ctx, identity) - return next.HandleFinalize(ctx, in) -} - -type identityKey struct{} - -func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { - return middleware.WithStackValue(ctx, identityKey{}, identity) -} - -func getIdentity(ctx context.Context) smithyauth.Identity { - v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) - return v -} - -type signRequestMiddleware struct { -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - identity := getIdentity(ctx) - if identity == nil { - return out, metadata, fmt.Errorf("no identity") - } - - signer := rscheme.Scheme.Signer() - if signer == nil { - return out, metadata, fmt.Errorf("no signer") - } - - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { - return out, metadata, fmt.Errorf("sign request: %w", err) - } - - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go deleted file mode 100644 index 5d634ce35c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go +++ /dev/null @@ -1,2507 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "bytes" - "context" - "encoding/xml" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - smithy "github.com/aws/smithy-go" - smithyxml "github.com/aws/smithy-go/encoding/xml" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithytime "github.com/aws/smithy-go/time" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "strconv" - "strings" -) - -type awsAwsquery_deserializeOpAssumeRole struct { -} - -func (*awsAwsquery_deserializeOpAssumeRole) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpAssumeRole) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorAssumeRole(response, &metadata) - } - output := &AssumeRoleOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("AssumeRoleResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentAssumeRoleOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorAssumeRole(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("MalformedPolicyDocument", errorCode): - return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) - - case strings.EqualFold("PackedPolicyTooLarge", errorCode): - return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) - - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpAssumeRoleWithSAML struct { -} - -func (*awsAwsquery_deserializeOpAssumeRoleWithSAML) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpAssumeRoleWithSAML) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response, &metadata) - } - output := &AssumeRoleWithSAMLOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("AssumeRoleWithSAMLResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("IDPRejectedClaim", errorCode): - return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) - - case strings.EqualFold("InvalidIdentityToken", errorCode): - return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) - - case strings.EqualFold("MalformedPolicyDocument", errorCode): - return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) - - case strings.EqualFold("PackedPolicyTooLarge", errorCode): - return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) - - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpAssumeRoleWithWebIdentity struct { -} - -func (*awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response, &metadata) - } - output := &AssumeRoleWithWebIdentityOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("AssumeRoleWithWebIdentityResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("IDPCommunicationError", errorCode): - return awsAwsquery_deserializeErrorIDPCommunicationErrorException(response, errorBody) - - case strings.EqualFold("IDPRejectedClaim", errorCode): - return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) - - case strings.EqualFold("InvalidIdentityToken", errorCode): - return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) - - case strings.EqualFold("MalformedPolicyDocument", errorCode): - return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) - - case strings.EqualFold("PackedPolicyTooLarge", errorCode): - return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) - - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct { -} - -func (*awsAwsquery_deserializeOpDecodeAuthorizationMessage) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpDecodeAuthorizationMessage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response, &metadata) - } - output := &DecodeAuthorizationMessageOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("DecodeAuthorizationMessageResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("InvalidAuthorizationMessageException", errorCode): - return awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpGetAccessKeyInfo struct { -} - -func (*awsAwsquery_deserializeOpGetAccessKeyInfo) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpGetAccessKeyInfo) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response, &metadata) - } - output := &GetAccessKeyInfoOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("GetAccessKeyInfoResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpGetCallerIdentity struct { -} - -func (*awsAwsquery_deserializeOpGetCallerIdentity) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpGetCallerIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorGetCallerIdentity(response, &metadata) - } - output := &GetCallerIdentityOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("GetCallerIdentityResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorGetCallerIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpGetFederationToken struct { -} - -func (*awsAwsquery_deserializeOpGetFederationToken) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpGetFederationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorGetFederationToken(response, &metadata) - } - output := &GetFederationTokenOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("GetFederationTokenResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorGetFederationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("MalformedPolicyDocument", errorCode): - return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) - - case strings.EqualFold("PackedPolicyTooLarge", errorCode): - return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) - - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpGetSessionToken struct { -} - -func (*awsAwsquery_deserializeOpGetSessionToken) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpGetSessionToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorGetSessionToken(response, &metadata) - } - output := &GetSessionTokenOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("GetSessionTokenResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorGetSessionToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsAwsquery_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ExpiredTokenException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentExpiredTokenException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorIDPCommunicationErrorException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.IDPCommunicationErrorException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentIDPCommunicationErrorException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorIDPRejectedClaimException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.IDPRejectedClaimException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentIDPRejectedClaimException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidAuthorizationMessageException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorInvalidIdentityTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidIdentityTokenException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentInvalidIdentityTokenException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.MalformedPolicyDocumentException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.PackedPolicyTooLargeException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorRegionDisabledException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.RegionDisabledException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentRegionDisabledException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeDocumentAssumedRoleUser(v **types.AssumedRoleUser, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.AssumedRoleUser - if *v == nil { - sv = &types.AssumedRoleUser{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Arn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Arn = ptr.String(xtv) - } - - case strings.EqualFold("AssumedRoleId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AssumedRoleId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentCredentials(v **types.Credentials, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Credentials - if *v == nil { - sv = &types.Credentials{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessKeyId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AccessKeyId = ptr.String(xtv) - } - - case strings.EqualFold("Expiration", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.Expiration = ptr.Time(t) - } - - case strings.EqualFold("SecretAccessKey", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SecretAccessKey = ptr.String(xtv) - } - - case strings.EqualFold("SessionToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SessionToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ExpiredTokenException - if *v == nil { - sv = &types.ExpiredTokenException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentFederatedUser(v **types.FederatedUser, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.FederatedUser - if *v == nil { - sv = &types.FederatedUser{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Arn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Arn = ptr.String(xtv) - } - - case strings.EqualFold("FederatedUserId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.FederatedUserId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentIDPCommunicationErrorException(v **types.IDPCommunicationErrorException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IDPCommunicationErrorException - if *v == nil { - sv = &types.IDPCommunicationErrorException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentIDPRejectedClaimException(v **types.IDPRejectedClaimException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IDPRejectedClaimException - if *v == nil { - sv = &types.IDPRejectedClaimException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(v **types.InvalidAuthorizationMessageException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InvalidAuthorizationMessageException - if *v == nil { - sv = &types.InvalidAuthorizationMessageException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentInvalidIdentityTokenException(v **types.InvalidIdentityTokenException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InvalidIdentityTokenException - if *v == nil { - sv = &types.InvalidIdentityTokenException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(v **types.MalformedPolicyDocumentException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.MalformedPolicyDocumentException - if *v == nil { - sv = &types.MalformedPolicyDocumentException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(v **types.PackedPolicyTooLargeException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.PackedPolicyTooLargeException - if *v == nil { - sv = &types.PackedPolicyTooLargeException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentRegionDisabledException(v **types.RegionDisabledException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.RegionDisabledException - if *v == nil { - sv = &types.RegionDisabledException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentAssumeRoleOutput(v **AssumeRoleOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *AssumeRoleOutput - if *v == nil { - sv = &AssumeRoleOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AssumedRoleUser", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("PackedPolicySize", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PackedPolicySize = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("SourceIdentity", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SourceIdentity = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(v **AssumeRoleWithSAMLOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *AssumeRoleWithSAMLOutput - if *v == nil { - sv = &AssumeRoleWithSAMLOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AssumedRoleUser", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Audience", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Audience = ptr.String(xtv) - } - - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Issuer", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Issuer = ptr.String(xtv) - } - - case strings.EqualFold("NameQualifier", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NameQualifier = ptr.String(xtv) - } - - case strings.EqualFold("PackedPolicySize", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PackedPolicySize = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("SourceIdentity", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SourceIdentity = ptr.String(xtv) - } - - case strings.EqualFold("Subject", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Subject = ptr.String(xtv) - } - - case strings.EqualFold("SubjectType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SubjectType = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **AssumeRoleWithWebIdentityOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *AssumeRoleWithWebIdentityOutput - if *v == nil { - sv = &AssumeRoleWithWebIdentityOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AssumedRoleUser", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Audience", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Audience = ptr.String(xtv) - } - - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("PackedPolicySize", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PackedPolicySize = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Provider", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Provider = ptr.String(xtv) - } - - case strings.EqualFold("SourceIdentity", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SourceIdentity = ptr.String(xtv) - } - - case strings.EqualFold("SubjectFromWebIdentityToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SubjectFromWebIdentityToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *DecodeAuthorizationMessageOutput - if *v == nil { - sv = &DecodeAuthorizationMessageOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DecodedMessage", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.DecodedMessage = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(v **GetAccessKeyInfoOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetAccessKeyInfoOutput - if *v == nil { - sv = &GetAccessKeyInfoOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Account", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Account = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(v **GetCallerIdentityOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetCallerIdentityOutput - if *v == nil { - sv = &GetCallerIdentityOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Account", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Account = ptr.String(xtv) - } - - case strings.EqualFold("Arn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Arn = ptr.String(xtv) - } - - case strings.EqualFold("UserId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.UserId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(v **GetFederationTokenOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetFederationTokenOutput - if *v == nil { - sv = &GetFederationTokenOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("FederatedUser", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentFederatedUser(&sv.FederatedUser, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("PackedPolicySize", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PackedPolicySize = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(v **GetSessionTokenOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetSessionTokenOutput - if *v == nil { - sv = &GetSessionTokenOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go deleted file mode 100644 index d963fd8d19..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -// Package sts provides the API client, operations, and parameter types for AWS -// Security Token Service. -// -// Security Token Service Security Token Service (STS) enables you to request -// temporary, limited-privilege credentials for users. This guide provides -// descriptions of the STS API. For more information about using this service, see -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) -// . -package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go deleted file mode 100644 index 32e2d5435f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go +++ /dev/null @@ -1,1115 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - "github.com/aws/aws-sdk-go-v2/internal/endpoints" - "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" - internalendpoints "github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "net/url" - "os" - "strings" -) - -// EndpointResolverOptions is the service endpoint resolver options -type EndpointResolverOptions = internalendpoints.Options - -// EndpointResolver interface for resolving service endpoints. -type EndpointResolver interface { - ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) -} - -var _ EndpointResolver = &internalendpoints.Resolver{} - -// NewDefaultEndpointResolver constructs a new service endpoint resolver -func NewDefaultEndpointResolver() *internalendpoints.Resolver { - return internalendpoints.New() -} - -// EndpointResolverFunc is a helper utility that wraps a function so it satisfies -// the EndpointResolver interface. This is useful when you want to add additional -// endpoint resolving logic, or stub out specific endpoints with custom values. -type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) - -func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return fn(region, options) -} - -// EndpointResolverFromURL returns an EndpointResolver configured using the -// provided endpoint url. By default, the resolved endpoint resolver uses the -// client region as signing region, and the endpoint source is set to -// EndpointSourceCustom.You can provide functional options to configure endpoint -// values for the resolved endpoint. -func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { - e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} - for _, fn := range optFns { - fn(&e) - } - - return EndpointResolverFunc( - func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { - if len(e.SigningRegion) == 0 { - e.SigningRegion = region - } - return e, nil - }, - ) -} - -type ResolveEndpoint struct { - Resolver EndpointResolver - Options EndpointResolverOptions -} - -func (*ResolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.Resolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - eo := m.Options - eo.Logger = middleware.GetLogger(ctx) - - var endpoint aws.Endpoint - endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) - if err != nil { - nf := (&aws.EndpointNotFoundError{}) - if errors.As(err, &nf) { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) - return next.HandleSerialize(ctx, in) - } - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(awsmiddleware.GetSigningName(ctx)) == 0 { - signingName := endpoint.SigningName - if len(signingName) == 0 { - signingName = "sts" - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - } - ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) - ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) - return next.HandleSerialize(ctx, in) -} -func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&ResolveEndpoint{ - Resolver: o.EndpointResolver, - Options: o.EndpointOptions, - }, "OperationSerializer", middleware.Before) -} - -func removeResolveEndpointMiddleware(stack *middleware.Stack) error { - _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) - return err -} - -type wrappedEndpointResolver struct { - awsResolver aws.EndpointResolverWithOptions -} - -func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return w.awsResolver.ResolveEndpoint(ServiceID, region, options) -} - -type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) - -func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { - return a(service, region) -} - -var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) - -// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. -// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, -// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked -// via its middleware. -// -// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. -func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { - var resolver aws.EndpointResolverWithOptions - - if awsResolverWithOptions != nil { - resolver = awsResolverWithOptions - } else if awsResolver != nil { - resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) - } - - return &wrappedEndpointResolver{ - awsResolver: resolver, - } -} - -func finalizeClientEndpointResolverOptions(options *Options) { - options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() - - if len(options.EndpointOptions.ResolvedRegion) == 0 { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(options.Region, fipsInfix) || - strings.Contains(options.Region, fipsPrefix) || - strings.Contains(options.Region, fipsSuffix) { - options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled - } - } - -} - -func resolveEndpointResolverV2(options *Options) { - if options.EndpointResolverV2 == nil { - options.EndpointResolverV2 = NewDefaultEndpointResolverV2() - } -} - -func resolveBaseEndpoint(cfg aws.Config, o *Options) { - if cfg.BaseEndpoint != nil { - o.BaseEndpoint = cfg.BaseEndpoint - } - - _, g := os.LookupEnv("AWS_ENDPOINT_URL") - _, s := os.LookupEnv("AWS_ENDPOINT_URL_STS") - - if g && !s { - return - } - - value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "STS", cfg.ConfigSources) - if found && err == nil { - o.BaseEndpoint = &value - } -} - -func bindRegion(region string) *string { - if region == "" { - return nil - } - return aws.String(endpoints.MapFIPSRegion(region)) -} - -// EndpointParameters provides the parameters that influence how endpoints are -// resolved. -type EndpointParameters struct { - // The AWS region used to dispatch the request. - // - // Parameter is - // required. - // - // AWS::Region - Region *string - - // When true, use the dual-stack endpoint. If the configured endpoint does not - // support dual-stack, dispatching the request MAY return an error. - // - // Defaults to - // false if no value is provided. - // - // AWS::UseDualStack - UseDualStack *bool - - // When true, send this request to the FIPS-compliant regional endpoint. If the - // configured endpoint does not have a FIPS compliant endpoint, dispatching the - // request will return an error. - // - // Defaults to false if no value is - // provided. - // - // AWS::UseFIPS - UseFIPS *bool - - // Override the endpoint used to send this request - // - // Parameter is - // required. - // - // SDK::Endpoint - Endpoint *string - - // Whether the global endpoint should be used, rather then the regional endpoint - // for us-east-1. - // - // Defaults to false if no value is - // provided. - // - // AWS::STS::UseGlobalEndpoint - UseGlobalEndpoint *bool -} - -// ValidateRequired validates required parameters are set. -func (p EndpointParameters) ValidateRequired() error { - if p.UseDualStack == nil { - return fmt.Errorf("parameter UseDualStack is required") - } - - if p.UseFIPS == nil { - return fmt.Errorf("parameter UseFIPS is required") - } - - if p.UseGlobalEndpoint == nil { - return fmt.Errorf("parameter UseGlobalEndpoint is required") - } - - return nil -} - -// WithDefaults returns a shallow copy of EndpointParameterswith default values -// applied to members where applicable. -func (p EndpointParameters) WithDefaults() EndpointParameters { - if p.UseDualStack == nil { - p.UseDualStack = ptr.Bool(false) - } - - if p.UseFIPS == nil { - p.UseFIPS = ptr.Bool(false) - } - - if p.UseGlobalEndpoint == nil { - p.UseGlobalEndpoint = ptr.Bool(false) - } - return p -} - -// EndpointResolverV2 provides the interface for resolving service endpoints. -type EndpointResolverV2 interface { - // ResolveEndpoint attempts to resolve the endpoint with the provided options, - // returning the endpoint if found. Otherwise an error is returned. - ResolveEndpoint(ctx context.Context, params EndpointParameters) ( - smithyendpoints.Endpoint, error, - ) -} - -// resolver provides the implementation for resolving endpoints. -type resolver struct{} - -func NewDefaultEndpointResolverV2() EndpointResolverV2 { - return &resolver{} -} - -// ResolveEndpoint attempts to resolve the endpoint with the provided options, -// returning the endpoint if found. Otherwise an error is returned. -func (r *resolver) ResolveEndpoint( - ctx context.Context, params EndpointParameters, -) ( - endpoint smithyendpoints.Endpoint, err error, -) { - params = params.WithDefaults() - if err = params.ValidateRequired(); err != nil { - return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) - } - _UseDualStack := *params.UseDualStack - _UseFIPS := *params.UseFIPS - _UseGlobalEndpoint := *params.UseGlobalEndpoint - - if _UseGlobalEndpoint == true { - if !(params.Endpoint != nil) { - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _PartitionResult := *exprVal - _ = _PartitionResult - if _UseFIPS == false { - if _UseDualStack == false { - if _Region == "ap-northeast-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "ap-south-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "ap-southeast-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "ap-southeast-2" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "aws-global" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "ca-central-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "eu-central-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "eu-north-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "eu-west-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "eu-west-2" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "eu-west-3" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "sa-east-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "us-east-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "us-east-2" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "us-west-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "us-west-2" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") - } - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") - } - uriString := _Endpoint - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _PartitionResult := *exprVal - _ = _PartitionResult - if _UseFIPS == true { - if _UseDualStack == true { - if true == _PartitionResult.SupportsFIPS { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") - } - } - if _UseFIPS == true { - if _PartitionResult.SupportsFIPS == true { - if _PartitionResult.Name == "aws-us-gov" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") - } - if _UseDualStack == true { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") - } - if _Region == "aws-global" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") -} - -type endpointParamsBinder interface { - bindEndpointParams(*EndpointParameters) -} - -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { - params := &EndpointParameters{} - - params.Region = bindRegion(options.Region) - params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) - params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) - params.Endpoint = options.BaseEndpoint - - if b, ok := input.(endpointParamsBinder); ok { - b.bindEndpointParams(params) - } - - return params -} - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.options.EndpointResolverV2 == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := bindEndpointParams(getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - if endpt.URI.RawPath == "" && req.URL.RawPath != "" { - endpt.URI.RawPath = endpt.URI.Path - } - req.URL.Scheme = endpt.URI.Scheme - req.URL.Host = endpt.URI.Host - req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) - req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) - for k := range endpt.Headers { - req.Header.Set(k, endpt.Headers.Get(k)) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) - for _, o := range opts { - rscheme.SignerProperties.SetAll(&o.SignerProperties) - } - - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json deleted file mode 100644 index 6b6e839e6c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "dependencies": { - "github.com/aws/aws-sdk-go-v2": "v1.4.0", - "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5", - "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", - "github.com/aws/smithy-go": "v1.4.0" - }, - "files": [ - "api_client.go", - "api_client_test.go", - "api_op_AssumeRole.go", - "api_op_AssumeRoleWithSAML.go", - "api_op_AssumeRoleWithWebIdentity.go", - "api_op_DecodeAuthorizationMessage.go", - "api_op_GetAccessKeyInfo.go", - "api_op_GetCallerIdentity.go", - "api_op_GetFederationToken.go", - "api_op_GetSessionToken.go", - "auth.go", - "deserializers.go", - "doc.go", - "endpoints.go", - "endpoints_config_test.go", - "endpoints_test.go", - "generated.json", - "internal/endpoints/endpoints.go", - "internal/endpoints/endpoints_test.go", - "options.go", - "protocol_test.go", - "serializers.go", - "snapshot_test.go", - "types/errors.go", - "types/types.go", - "validators.go" - ], - "go": "1.15", - "module": "github.com/aws/aws-sdk-go-v2/service/sts", - "unstable": false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go deleted file mode 100644 index 8bba9b7dc1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package sts - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.28.5" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go deleted file mode 100644 index 3dbd993b54..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go +++ /dev/null @@ -1,512 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package endpoints - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" - "github.com/aws/smithy-go/logging" - "regexp" -) - -// Options is the endpoint resolver configuration options -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the - // provided logger. - LogDeprecated bool - - // ResolvedRegion is used to override the region to be resolved, rather then the - // using the value passed to the ResolveEndpoint method. This value is used by the - // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative - // name. You must not set this value directly in your application. - ResolvedRegion string - - // DisableHTTPS informs the resolver to return an endpoint that does not use the - // HTTPS scheme. - DisableHTTPS bool - - // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. - UseDualStackEndpoint aws.DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint aws.FIPSEndpointState -} - -func (o Options) GetResolvedRegion() string { - return o.ResolvedRegion -} - -func (o Options) GetDisableHTTPS() bool { - return o.DisableHTTPS -} - -func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { - return o.UseDualStackEndpoint -} - -func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { - return o.UseFIPSEndpoint -} - -func transformToSharedOptions(options Options) endpoints.Options { - return endpoints.Options{ - Logger: options.Logger, - LogDeprecated: options.LogDeprecated, - ResolvedRegion: options.ResolvedRegion, - DisableHTTPS: options.DisableHTTPS, - UseDualStackEndpoint: options.UseDualStackEndpoint, - UseFIPSEndpoint: options.UseFIPSEndpoint, - } -} - -// Resolver STS endpoint resolver -type Resolver struct { - partitions endpoints.Partitions -} - -// ResolveEndpoint resolves the service endpoint for the given region and options -func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { - if len(region) == 0 { - return endpoint, &aws.MissingRegionError{} - } - - opt := transformToSharedOptions(options) - return r.partitions.ResolveEndpoint(region, opt) -} - -// New returns a new Resolver -func New() *Resolver { - return &Resolver{ - partitions: defaultPartitions, - } -} - -var partitionRegexp = struct { - Aws *regexp.Regexp - AwsCn *regexp.Regexp - AwsIso *regexp.Regexp - AwsIsoB *regexp.Regexp - AwsIsoE *regexp.Regexp - AwsIsoF *regexp.Regexp - AwsUsGov *regexp.Regexp -}{ - - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), - AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), - AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), - AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), - AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), - AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), - AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), -} - -var defaultPartitions = endpoints.Partitions{ - { - ID: "aws", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "sts.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "sts-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.Aws, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "af-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-northeast-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-northeast-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-northeast-3", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-south-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-3", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-4", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "aws-global", - }: endpoints.Endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ca-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-central-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-north-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-south-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-west-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-west-3", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "il-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "me-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "me-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "sa-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.us-east-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-east-1-fips", - }: endpoints.Endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-east-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-east-2", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.us-east-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-east-2-fips", - }: endpoints.Endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.us-west-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-west-1-fips", - }: endpoints.Endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-west-2", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.us-west-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-west-2-fips", - }: endpoints.Endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - Deprecated: aws.TrueTernary, - }, - }, - }, - { - ID: "aws-cn", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "sts.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "sts-fips.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsCn, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "cn-north-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "cn-northwest-1", - }: endpoints.Endpoint{}, - }, - }, - { - ID: "aws-iso", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIso, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-iso-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-iso-west-1", - }: endpoints.Endpoint{}, - }, - }, - { - ID: "aws-iso-b", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoB, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-isob-east-1", - }: endpoints.Endpoint{}, - }, - }, - { - ID: "aws-iso-e", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoE, - IsRegionalized: true, - }, - { - ID: "aws-iso-f", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoF, - IsRegionalized: true, - }, - { - ID: "aws-us-gov", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "sts.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "sts-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsUsGov, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-gov-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-gov-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts.us-gov-east-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-gov-east-1-fips", - }: endpoints.Endpoint{ - Hostname: "sts.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts.us-gov-west-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1-fips", - }: endpoints.Endpoint{ - Hostname: "sts.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: aws.TrueTernary, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go deleted file mode 100644 index 5c1be79f8c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go +++ /dev/null @@ -1,217 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" -) - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. - EndpointResolverV2 EndpointResolverV2 - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // The auth scheme resolver which determines how to authenticate for each - // operation. - AuthSchemeResolver AuthSchemeResolver - - // The list of auth schemes supported by the client. - AuthSchemes []smithyhttp.AuthScheme -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - - return to -} - -func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { - if schemeID == "aws.auth#sigv4" { - return getSigV4IdentityResolver(o) - } - if schemeID == "smithy.api#noAuth" { - return &smithyauth.AnonymousIdentityResolver{} - } - return nil -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { - if o.Credentials != nil { - return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} - } - return nil -} - -// WithSigV4SigningName applies an override to the authentication workflow to -// use the given signing name for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing name from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningName(name string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), - middleware.Before, - ) - }) - } -} - -// WithSigV4SigningRegion applies an override to the authentication workflow to -// use the given signing region for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing region from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningRegion(region string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), - middleware.Before, - ) - }) - } -} - -func ignoreAnonymousAuth(options *Options) { - if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { - options.Credentials = nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go deleted file mode 100644 index 4c08061c0c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go +++ /dev/null @@ -1,862 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "bytes" - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws/protocol/query" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/encoding/httpbinding" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "path" -) - -type awsAwsquery_serializeOpAssumeRole struct { -} - -func (*awsAwsquery_serializeOpAssumeRole) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*AssumeRoleInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("AssumeRole") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentAssumeRoleInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpAssumeRoleWithSAML struct { -} - -func (*awsAwsquery_serializeOpAssumeRoleWithSAML) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("AssumeRoleWithSAML") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpAssumeRoleWithWebIdentity struct { -} - -func (*awsAwsquery_serializeOpAssumeRoleWithWebIdentity) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("AssumeRoleWithWebIdentity") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpDecodeAuthorizationMessage struct { -} - -func (*awsAwsquery_serializeOpDecodeAuthorizationMessage) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("DecodeAuthorizationMessage") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpGetAccessKeyInfo struct { -} - -func (*awsAwsquery_serializeOpGetAccessKeyInfo) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetAccessKeyInfoInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("GetAccessKeyInfo") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpGetCallerIdentity struct { -} - -func (*awsAwsquery_serializeOpGetCallerIdentity) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetCallerIdentityInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("GetCallerIdentity") - body.Key("Version").String("2011-06-15") - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpGetFederationToken struct { -} - -func (*awsAwsquery_serializeOpGetFederationToken) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetFederationTokenInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("GetFederationToken") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentGetFederationTokenInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpGetSessionToken struct { -} - -func (*awsAwsquery_serializeOpGetSessionToken) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetSessionTokenInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("GetSessionToken") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentGetSessionTokenInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - return next.HandleSerialize(ctx, in) -} -func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error { - array := value.Array("member") - - for i := range v { - av := array.Value() - if err := awsAwsquery_serializeDocumentPolicyDescriptorType(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsquery_serializeDocumentPolicyDescriptorType(v *types.PolicyDescriptorType, value query.Value) error { - object := value.Object() - _ = object - - if v.Arn != nil { - objectKey := object.Key("arn") - objectKey.String(*v.Arn) - } - - return nil -} - -func awsAwsquery_serializeDocumentProvidedContext(v *types.ProvidedContext, value query.Value) error { - object := value.Object() - _ = object - - if v.ContextAssertion != nil { - objectKey := object.Key("ContextAssertion") - objectKey.String(*v.ContextAssertion) - } - - if v.ProviderArn != nil { - objectKey := object.Key("ProviderArn") - objectKey.String(*v.ProviderArn) - } - - return nil -} - -func awsAwsquery_serializeDocumentProvidedContextsListType(v []types.ProvidedContext, value query.Value) error { - array := value.Array("member") - - for i := range v { - av := array.Value() - if err := awsAwsquery_serializeDocumentProvidedContext(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error { - object := value.Object() - _ = object - - if v.Key != nil { - objectKey := object.Key("Key") - objectKey.String(*v.Key) - } - - if v.Value != nil { - objectKey := object.Key("Value") - objectKey.String(*v.Value) - } - - return nil -} - -func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) error { - array := value.Array("member") - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsquery_serializeDocumentTagListType(v []types.Tag, value query.Value) error { - array := value.Array("member") - - for i := range v { - av := array.Value() - if err := awsAwsquery_serializeDocumentTag(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsquery_serializeOpDocumentAssumeRoleInput(v *AssumeRoleInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.ExternalId != nil { - objectKey := object.Key("ExternalId") - objectKey.String(*v.ExternalId) - } - - if v.Policy != nil { - objectKey := object.Key("Policy") - objectKey.String(*v.Policy) - } - - if v.PolicyArns != nil { - objectKey := object.Key("PolicyArns") - if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { - return err - } - } - - if v.ProvidedContexts != nil { - objectKey := object.Key("ProvidedContexts") - if err := awsAwsquery_serializeDocumentProvidedContextsListType(v.ProvidedContexts, objectKey); err != nil { - return err - } - } - - if v.RoleArn != nil { - objectKey := object.Key("RoleArn") - objectKey.String(*v.RoleArn) - } - - if v.RoleSessionName != nil { - objectKey := object.Key("RoleSessionName") - objectKey.String(*v.RoleSessionName) - } - - if v.SerialNumber != nil { - objectKey := object.Key("SerialNumber") - objectKey.String(*v.SerialNumber) - } - - if v.SourceIdentity != nil { - objectKey := object.Key("SourceIdentity") - objectKey.String(*v.SourceIdentity) - } - - if v.Tags != nil { - objectKey := object.Key("Tags") - if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { - return err - } - } - - if v.TokenCode != nil { - objectKey := object.Key("TokenCode") - objectKey.String(*v.TokenCode) - } - - if v.TransitiveTagKeys != nil { - objectKey := object.Key("TransitiveTagKeys") - if err := awsAwsquery_serializeDocumentTagKeyListType(v.TransitiveTagKeys, objectKey); err != nil { - return err - } - } - - return nil -} - -func awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.Policy != nil { - objectKey := object.Key("Policy") - objectKey.String(*v.Policy) - } - - if v.PolicyArns != nil { - objectKey := object.Key("PolicyArns") - if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { - return err - } - } - - if v.PrincipalArn != nil { - objectKey := object.Key("PrincipalArn") - objectKey.String(*v.PrincipalArn) - } - - if v.RoleArn != nil { - objectKey := object.Key("RoleArn") - objectKey.String(*v.RoleArn) - } - - if v.SAMLAssertion != nil { - objectKey := object.Key("SAMLAssertion") - objectKey.String(*v.SAMLAssertion) - } - - return nil -} - -func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.Policy != nil { - objectKey := object.Key("Policy") - objectKey.String(*v.Policy) - } - - if v.PolicyArns != nil { - objectKey := object.Key("PolicyArns") - if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { - return err - } - } - - if v.ProviderId != nil { - objectKey := object.Key("ProviderId") - objectKey.String(*v.ProviderId) - } - - if v.RoleArn != nil { - objectKey := object.Key("RoleArn") - objectKey.String(*v.RoleArn) - } - - if v.RoleSessionName != nil { - objectKey := object.Key("RoleSessionName") - objectKey.String(*v.RoleSessionName) - } - - if v.WebIdentityToken != nil { - objectKey := object.Key("WebIdentityToken") - objectKey.String(*v.WebIdentityToken) - } - - return nil -} - -func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error { - object := value.Object() - _ = object - - if v.EncodedMessage != nil { - objectKey := object.Key("EncodedMessage") - objectKey.String(*v.EncodedMessage) - } - - return nil -} - -func awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(v *GetAccessKeyInfoInput, value query.Value) error { - object := value.Object() - _ = object - - if v.AccessKeyId != nil { - objectKey := object.Key("AccessKeyId") - objectKey.String(*v.AccessKeyId) - } - - return nil -} - -func awsAwsquery_serializeOpDocumentGetCallerIdentityInput(v *GetCallerIdentityInput, value query.Value) error { - object := value.Object() - _ = object - - return nil -} - -func awsAwsquery_serializeOpDocumentGetFederationTokenInput(v *GetFederationTokenInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.Name != nil { - objectKey := object.Key("Name") - objectKey.String(*v.Name) - } - - if v.Policy != nil { - objectKey := object.Key("Policy") - objectKey.String(*v.Policy) - } - - if v.PolicyArns != nil { - objectKey := object.Key("PolicyArns") - if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { - return err - } - } - - if v.Tags != nil { - objectKey := object.Key("Tags") - if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { - return err - } - } - - return nil -} - -func awsAwsquery_serializeOpDocumentGetSessionTokenInput(v *GetSessionTokenInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.SerialNumber != nil { - objectKey := object.Key("SerialNumber") - objectKey.String(*v.SerialNumber) - } - - if v.TokenCode != nil { - objectKey := object.Key("TokenCode") - objectKey.String(*v.TokenCode) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go deleted file mode 100644 index 097875b279..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go +++ /dev/null @@ -1,244 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - "fmt" - smithy "github.com/aws/smithy-go" -) - -// The web identity token that was passed is expired or is not valid. Get a new -// identity token from the identity provider and then retry the request. -type ExpiredTokenException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ExpiredTokenException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ExpiredTokenException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ExpiredTokenException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ExpiredTokenException" - } - return *e.ErrorCodeOverride -} -func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The request could not be fulfilled because the identity provider (IDP) that was -// asked to verify the incoming identity token could not be reached. This is often -// a transient error caused by network conditions. Retry the request a limited -// number of times so that you don't exceed the request rate. If the error -// persists, the identity provider might be down or not responding. -type IDPCommunicationErrorException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *IDPCommunicationErrorException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *IDPCommunicationErrorException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *IDPCommunicationErrorException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "IDPCommunicationError" - } - return *e.ErrorCodeOverride -} -func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The identity provider (IdP) reported that authentication failed. This might be -// because the claim is invalid. If this error is returned for the -// AssumeRoleWithWebIdentity operation, it can also mean that the claim has expired -// or has been explicitly revoked. -type IDPRejectedClaimException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *IDPRejectedClaimException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *IDPRejectedClaimException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *IDPRejectedClaimException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "IDPRejectedClaim" - } - return *e.ErrorCodeOverride -} -func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. -type InvalidAuthorizationMessageException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidAuthorizationMessageException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidAuthorizationMessageException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidAuthorizationMessageException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidAuthorizationMessageException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault { - return smithy.FaultClient -} - -// The web identity token that was passed could not be validated by Amazon Web -// Services. Get a new identity token from the identity provider and then retry the -// request. -type InvalidIdentityTokenException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidIdentityTokenException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidIdentityTokenException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidIdentityTokenException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidIdentityToken" - } - return *e.ErrorCodeOverride -} -func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -type MalformedPolicyDocumentException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *MalformedPolicyDocumentException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *MalformedPolicyDocumentException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *MalformedPolicyDocumentException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "MalformedPolicyDocument" - } - return *e.ErrorCodeOverride -} -func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session tags -// into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper size -// limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You could receive this error even though you meet other -// defined session policy and session tag limits. For more information, see IAM -// and STS Entity Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -type PackedPolicyTooLargeException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *PackedPolicyTooLargeException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *PackedPolicyTooLargeException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *PackedPolicyTooLargeException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "PackedPolicyTooLarge" - } - return *e.ErrorCodeOverride -} -func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -type RegionDisabledException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RegionDisabledException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RegionDisabledException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RegionDisabledException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RegionDisabledException" - } - return *e.ErrorCodeOverride -} -func (e *RegionDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go deleted file mode 100644 index e3701d11d1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go +++ /dev/null @@ -1,134 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - smithydocument "github.com/aws/smithy-go/document" - "time" -) - -// The identifiers for the temporary security credentials that the operation -// returns. -type AssumedRoleUser struct { - - // The ARN of the temporary security credentials that are returned from the - // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. - // - // This member is required. - Arn *string - - // A unique identifier that contains the role ID and the role session name of the - // role that is being assumed. The role ID is generated by Amazon Web Services when - // the role is created. - // - // This member is required. - AssumedRoleId *string - - noSmithyDocumentSerde -} - -// Amazon Web Services credentials for API authentication. -type Credentials struct { - - // The access key ID that identifies the temporary security credentials. - // - // This member is required. - AccessKeyId *string - - // The date on which the current credentials expire. - // - // This member is required. - Expiration *time.Time - - // The secret access key that can be used to sign requests. - // - // This member is required. - SecretAccessKey *string - - // The token that users must pass to the service API to use the temporary - // credentials. - // - // This member is required. - SessionToken *string - - noSmithyDocumentSerde -} - -// Identifiers for the federated user that is associated with the credentials. -type FederatedUser struct { - - // The ARN that specifies the federated user that is associated with the - // credentials. For more information about ARNs and how to use them in policies, - // see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. - // - // This member is required. - Arn *string - - // The string that identifies the federated user associated with the credentials, - // similar to the unique ID of an IAM user. - // - // This member is required. - FederatedUserId *string - - noSmithyDocumentSerde -} - -// A reference to the IAM managed policy that is passed as a session policy for a -// role session or a federated user session. -type PolicyDescriptorType struct { - - // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session - // policy for the role. For more information about ARNs, see Amazon Resource Names - // (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. - Arn *string - - noSmithyDocumentSerde -} - -// Contains information about the provided context. This includes the signed and -// encrypted trusted context assertion and the context provider ARN from which the -// trusted context assertion was generated. -type ProvidedContext struct { - - // The signed and encrypted trusted context assertion generated by the context - // provider. The trusted context assertion is signed and encrypted by Amazon Web - // Services STS. - ContextAssertion *string - - // The context provider ARN from which the trusted context assertion was generated. - ProviderArn *string - - noSmithyDocumentSerde -} - -// You can pass custom key-value pair attributes when you assume a role or -// federate a user. These are called session tags. You can then use the session -// tags to control access to resources. For more information, see Tagging Amazon -// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -type Tag struct { - - // The key for a session tag. You can pass up to 50 session tags. The plain text - // session tag keys can’t exceed 128 characters. For these and additional limits, - // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // This member is required. - Key *string - - // The value for a session tag. You can pass up to 50 session tags. The plain text - // session tag values can’t exceed 256 characters. For these and additional limits, - // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // This member is required. - Value *string - - noSmithyDocumentSerde -} - -type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go deleted file mode 100644 index 3e4bad2a92..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go +++ /dev/null @@ -1,305 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -type validateOpAssumeRole struct { -} - -func (*validateOpAssumeRole) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpAssumeRole) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*AssumeRoleInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpAssumeRoleInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpAssumeRoleWithSAML struct { -} - -func (*validateOpAssumeRoleWithSAML) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpAssumeRoleWithSAML) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpAssumeRoleWithSAMLInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpAssumeRoleWithWebIdentity struct { -} - -func (*validateOpAssumeRoleWithWebIdentity) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpAssumeRoleWithWebIdentityInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDecodeAuthorizationMessage struct { -} - -func (*validateOpDecodeAuthorizationMessage) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDecodeAuthorizationMessage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDecodeAuthorizationMessageInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetAccessKeyInfo struct { -} - -func (*validateOpGetAccessKeyInfo) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetAccessKeyInfo) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetAccessKeyInfoInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetAccessKeyInfoInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetFederationToken struct { -} - -func (*validateOpGetFederationToken) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetFederationToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetFederationTokenInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetFederationTokenInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -func addOpAssumeRoleValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpAssumeRole{}, middleware.After) -} - -func addOpAssumeRoleWithSAMLValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpAssumeRoleWithSAML{}, middleware.After) -} - -func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After) -} - -func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After) -} - -func addOpGetAccessKeyInfoValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetAccessKeyInfo{}, middleware.After) -} - -func addOpGetFederationTokenValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetFederationToken{}, middleware.After) -} - -func validateTag(v *types.Tag) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Tag"} - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.Value == nil { - invalidParams.Add(smithy.NewErrParamRequired("Value")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTagListType(v []types.Tag) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TagListType"} - for i := range v { - if err := validateTag(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpAssumeRoleInput(v *AssumeRoleInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleInput"} - if v.RoleArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) - } - if v.RoleSessionName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) - } - if v.Tags != nil { - if err := validateTagListType(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithSAMLInput"} - if v.RoleArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) - } - if v.PrincipalArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("PrincipalArn")) - } - if v.SAMLAssertion == nil { - invalidParams.Add(smithy.NewErrParamRequired("SAMLAssertion")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithWebIdentityInput"} - if v.RoleArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) - } - if v.RoleSessionName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) - } - if v.WebIdentityToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("WebIdentityToken")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DecodeAuthorizationMessageInput"} - if v.EncodedMessage == nil { - invalidParams.Add(smithy.NewErrParamRequired("EncodedMessage")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetAccessKeyInfoInput(v *GetAccessKeyInfoInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetAccessKeyInfoInput"} - if v.AccessKeyId == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccessKeyId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetFederationTokenInput(v *GetFederationTokenInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetFederationTokenInput"} - if v.Name == nil { - invalidParams.Add(smithy.NewErrParamRequired("Name")) - } - if v.Tags != nil { - if err := validateTagListType(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore deleted file mode 100644 index 2518b34915..0000000000 --- a/vendor/github.com/aws/smithy-go/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -# Eclipse -.classpath -.project -.settings/ - -# Intellij -.idea/ -*.iml -*.iws - -# Mac -.DS_Store - -# Maven -target/ -**/dependency-reduced-pom.xml - -# Gradle -/.gradle -build/ -*/out/ -*/*/out/ - -# VS Code -bin/ -.vscode/ - -# make -c.out diff --git a/vendor/github.com/aws/smithy-go/.travis.yml b/vendor/github.com/aws/smithy-go/.travis.yml deleted file mode 100644 index f8d1035cc3..0000000000 --- a/vendor/github.com/aws/smithy-go/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: go -sudo: true -dist: bionic - -branches: - only: - - main - -os: - - linux - - osx - # Travis doesn't work with windows and Go tip - #- windows - -go: - - tip - -matrix: - allow_failures: - - go: tip - -before_install: - - if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi - - (cd /tmp/; go get golang.org/x/lint/golint) - -script: - - make go test -v ./...; - diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md deleted file mode 100644 index b8d6561a4e..0000000000 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ /dev/null @@ -1,229 +0,0 @@ -# Release (2024-02-21) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.20.1 - * **Bug Fix**: Remove runtime dependency on go-cmp. - -# Release (2024-02-13) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.20.0 - * **Feature**: Add codegen definition for sigv4a trait. - * **Feature**: Bump minimum Go version to 1.20 per our language support policy. - -# Release (2023-12-07) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.19.0 - * **Feature**: Support modeled request compression. - -# Release (2023-11-30) - -* No change notes available for this release. - -# Release (2023-11-29) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.18.0 - * **Feature**: Expose Options() method on generated service clients. - -# Release (2023-11-15) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.17.0 - * **Feature**: Support identity/auth components of client reference architecture. - -# Release (2023-10-31) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.16.0 - * **Feature**: **LANG**: Bump minimum go version to 1.19. - -# Release (2023-10-06) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.15.0 - * **Feature**: Add `http.WithHeaderComment` middleware. - -# Release (2023-08-18) - -* No change notes available for this release. - -# Release (2023-08-07) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.14.1 - * **Bug Fix**: Prevent duplicated error returns in EndpointResolverV2 default implementation. - -# Release (2023-07-31) - -## General Highlights -* **Feature**: Adds support for smithy-modeled endpoint resolution. - -# Release (2022-12-02) - -* No change notes available for this release. - -# Release (2022-10-24) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.13.4 - * **Bug Fix**: fixed document type checking for encoding nested types - -# Release (2022-09-14) - -* No change notes available for this release. - -# Release (v1.13.2) - -* No change notes available for this release. - -# Release (v1.13.1) - -* No change notes available for this release. - -# Release (v1.13.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.13.0 - * **Feature**: Adds support for the Smithy httpBearerAuth authentication trait to smithy-go. This allows the SDK to support the bearer authentication flow for API operations decorated with httpBearerAuth. An API client will need to be provided with its own bearer.TokenProvider implementation or use the bearer.StaticTokenProvider implementation. - -# Release (v1.12.1) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.12.1 - * **Bug Fix**: Fixes a bug where JSON object keys were not escaped. - -# Release (v1.12.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.12.0 - * **Feature**: `transport/http`: Add utility for setting context metadata when operation serializer automatically assigns content-type default value. - -# Release (v1.11.3) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.11.3 - * **Dependency Update**: Updates smithy-go unit test dependency go-cmp to 0.5.8. - -# Release (v1.11.2) - -* No change notes available for this release. - -# Release (v1.11.1) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.11.1 - * **Bug Fix**: Updates the smithy-go HTTP Request to correctly handle building the request to an http.Request. Related to [aws/aws-sdk-go-v2#1583](https://github.com/aws/aws-sdk-go-v2/issues/1583) - -# Release (v1.11.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.11.0 - * **Feature**: Updates deserialization of header list to supported quoted strings - -# Release (v1.10.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.10.0 - * **Feature**: Add `ptr.Duration`, `ptr.ToDuration`, `ptr.DurationSlice`, `ptr.ToDurationSlice`, `ptr.DurationMap`, and `ptr.ToDurationMap` functions for the `time.Duration` type. - -# Release (v1.9.1) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.9.1 - * **Documentation**: Fixes various typos in Go package documentation. - -# Release (v1.9.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.9.0 - * **Feature**: sync: OnceErr, can be used to concurrently record a signal when an error has occurred. - * **Bug Fix**: `transport/http`: CloseResponseBody and ErrorCloseResponseBody middleware have been updated to ensure that the body is fully drained before closing. - -# Release v1.8.1 - -### Smithy Go Module -* **Bug Fix**: Fixed an issue that would cause the HTTP Content-Length to be set to 0 if the stream body was not set. - * Fixes [aws/aws-sdk-go-v2#1418](https://github.com/aws/aws-sdk-go-v2/issues/1418) - -# Release v1.8.0 - -### Smithy Go Module - -* `time`: Add support for parsing additional DateTime timestamp format ([#324](https://github.com/aws/smithy-go/pull/324)) - * Adds support for parsing DateTime timestamp formatted time similar to RFC 3339, but without the `Z` character, nor UTC offset. - * Fixes [#1387](https://github.com/aws/aws-sdk-go-v2/issues/1387) - -# Release v1.7.0 - -### Smithy Go Module -* `ptr`: Handle error for deferred file close call ([#314](https://github.com/aws/smithy-go/pull/314)) - * Handle error for defer close call -* `middleware`: Add Clone to Metadata ([#318](https://github.com/aws/smithy-go/pull/318)) - * Adds a new Clone method to the middleware Metadata type. This provides a shallow clone of the entries in the Metadata. -* `document`: Add new package for document shape serialization support ([#310](https://github.com/aws/smithy-go/pull/310)) - -### Codegen -* Add Smithy Document Shape Support ([#310](https://github.com/aws/smithy-go/pull/310)) - * Adds support for Smithy Document shapes and supporting types for protocols to implement support - -# Release v1.6.0 (2021-07-15) - -### Smithy Go Module -* `encoding/httpbinding`: Support has been added for encoding `float32` and `float64` values that are `NaN`, `Infinity`, or `-Infinity`. ([#316](https://github.com/aws/smithy-go/pull/316)) - -### Codegen -* Adds support for handling `float32` and `float64` `NaN` values in HTTP Protocol Unit Tests. ([#316](https://github.com/aws/smithy-go/pull/316)) -* Adds support protocol generator implementations to override the error code string returned by `ErrorCode` methods on generated error types. ([#315](https://github.com/aws/smithy-go/pull/315)) - -# Release v1.5.0 (2021-06-25) - -### Smithy Go module -* `time`: Update time parsing to not be as strict for HTTPDate and DateTime ([#307](https://github.com/aws/smithy-go/pull/307)) - * Fixes [#302](https://github.com/aws/smithy-go/issues/302) by changing time to UTC before formatting so no local offset time is lost. - -### Codegen -* Adds support for integrating client members via plugins ([#301](https://github.com/aws/smithy-go/pull/301)) -* Fix serialization of enum types marked with payload trait ([#296](https://github.com/aws/smithy-go/pull/296)) -* Update generation of API client modules to include a manifest of files generated ([#283](https://github.com/aws/smithy-go/pull/283)) -* Update Group Java group ID for smithy-go generator ([#298](https://github.com/aws/smithy-go/pull/298)) -* Support the delegation of determining the errors that can occur for an operation ([#304](https://github.com/aws/smithy-go/pull/304)) -* Support for marking and documenting deprecated client config fields. ([#303](https://github.com/aws/smithy-go/pull/303)) - -# Release v1.4.0 (2021-05-06) - -### Smithy Go module -* `encoding/xml`: Fix escaping of Next Line and Line Start in XML Encoder ([#267](https://github.com/aws/smithy-go/pull/267)) - -### Codegen -* Add support for Smithy 1.7 ([#289](https://github.com/aws/smithy-go/pull/289)) -* Add support for httpQueryParams location -* Add support for model renaming conflict resolution with service closure - -# Release v1.3.1 (2021-04-08) - -### Smithy Go module -* `transport/http`: Loosen endpoint hostname validation to allow specifying port numbers. ([#279](https://github.com/aws/smithy-go/pull/279)) -* `io`: Fix RingBuffer panics due to out of bounds index. ([#282](https://github.com/aws/smithy-go/pull/282)) - -# Release v1.3.0 (2021-04-01) - -### Smithy Go module -* `transport/http`: Add utility to safely join string to url path, and url raw query. - -### Codegen -* Update HttpBindingProtocolGenerator to use http/transport JoinPath and JoinQuery utility. - -# Release v1.2.0 (2021-03-12) - -### Smithy Go module -* Fix support for parsing shortened year format in HTTP Date header. -* Fix GitHub APIDiff action workflow to get gorelease tool correctly. -* Fix codegen artifact unit test for Go 1.16 - -### Codegen -* Fix generating paginator nil parameter handling before usage. -* Fix Serialize unboxed members decorated as required. -* Add ability to define resolvers at both client construction and operation invocation. -* Support for extending paginators with custom runtime trait diff --git a/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md deleted file mode 100644 index 5b627cfa60..0000000000 --- a/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,4 +0,0 @@ -## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md deleted file mode 100644 index c4b6a1c508..0000000000 --- a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md +++ /dev/null @@ -1,59 +0,0 @@ -# Contributing Guidelines - -Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional -documentation, we greatly value feedback and contributions from our community. - -Please read through this document before submitting any issues or pull requests to ensure we have all the necessary -information to effectively respond to your bug report or contribution. - - -## Reporting Bugs/Feature Requests - -We welcome you to use the GitHub issue tracker to report bugs or suggest features. - -When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already -reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: - -* A reproducible test case or series of steps -* The version of our code being used -* Any modifications you've made relevant to the bug -* Anything unusual about your environment or deployment - - -## Contributing via Pull Requests -Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: - -1. You are working against the latest source on the *main* branch. -2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. -3. You open an issue to discuss any significant work - we would hate for your time to be wasted. - -To send us a pull request, please: - -1. Fork the repository. -2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. -3. Ensure local tests pass. -4. Commit to your fork using clear commit messages. -5. Send us a pull request, answering any default questions in the pull request interface. -6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. - -GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and -[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). - - -## Finding contributions to work on -Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. - - -## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. - - -## Security issue notifications -If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. - - -## Licensing - -See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. diff --git a/vendor/github.com/aws/smithy-go/LICENSE b/vendor/github.com/aws/smithy-go/LICENSE deleted file mode 100644 index 67db858821..0000000000 --- a/vendor/github.com/aws/smithy-go/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile deleted file mode 100644 index e66fa8cace..0000000000 --- a/vendor/github.com/aws/smithy-go/Makefile +++ /dev/null @@ -1,102 +0,0 @@ -PRE_RELEASE_VERSION ?= - -RELEASE_MANIFEST_FILE ?= -RELEASE_CHGLOG_DESC_FILE ?= - -REPOTOOLS_VERSION ?= latest -REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools -REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS ?= -REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION} - -UNIT_TEST_TAGS= -BUILD_TAGS= - -ifneq ($(PRE_RELEASE_VERSION),) - REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION} -endif - -smithy-publish-local: - cd codegen && ./gradlew publishToMavenLocal - -smithy-build: - cd codegen && ./gradlew build - -smithy-clean: - cd codegen && ./gradlew clean - -################## -# Linting/Verify # -################## -.PHONY: verify vet cover - -verify: vet - -vet: - go vet ${BUILD_TAGS} --all ./... - -cover: - go test ${BUILD_TAGS} -coverprofile c.out ./... - @cover=`go tool cover -func c.out | grep '^total:' | awk '{ print $$3+0 }'`; \ - echo "total (statements): $$cover%"; - -################ -# Unit Testing # -################ -.PHONY: unit unit-race unit-test unit-race-test - -unit: verify - go vet ${BUILD_TAGS} --all ./... && \ - go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ - go test -timeout=1m ${UNIT_TEST_TAGS} ./... - -unit-race: verify - go vet ${BUILD_TAGS} --all ./... && \ - go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ - go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... - -unit-test: verify - go test -timeout=1m ${UNIT_TEST_TAGS} ./... - -unit-race-test: verify - go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... - -##################### -# Release Process # -##################### -.PHONY: preview-release pre-release-validation release - -preview-release: - go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} - -pre-release-validation: - @if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \ - echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \ - fi - @if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \ - echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \ - fi - -release: pre-release-validation - go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} - go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE} - go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE} - go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE} - go run ${REPOTOOLS_CMD_CHANGELOG} rm -all - go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE} - -module-version: - @go run ${REPOTOOLS_CMD_MODULE_VERSION} . - -############## -# Repo Tools # -############## -.PHONY: install-changelog - -install-changelog: - go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} diff --git a/vendor/github.com/aws/smithy-go/NOTICE b/vendor/github.com/aws/smithy-go/NOTICE deleted file mode 100644 index 616fc58894..0000000000 --- a/vendor/github.com/aws/smithy-go/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md deleted file mode 100644 index c374f69283..0000000000 --- a/vendor/github.com/aws/smithy-go/README.md +++ /dev/null @@ -1,27 +0,0 @@ -## Smithy Go - -[![Go Build Status](https://github.com/aws/smithy-go/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/go.yml)[![Codegen Build Status](https://github.com/aws/smithy-go/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/codegen.yml) - -[Smithy](https://smithy.io/) code generators for Go. - -**WARNING: All interfaces are subject to change.** - -## Can I use this? - -In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java), -such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html), -in order to generate transport mechanisms and serialization/deserialization -code ("serde") accordingly. - -The code generator does not currently support any protocols out of the box, -therefore the useability of this project on its own is currently limited. -Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html) -exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are -tracking the movement of those out of the SDK into smithy-go in -[#458](https://github.com/aws/smithy-go/issues/458), but there's currently no -timeline for doing so. - -## License - -This project is licensed under the Apache-2.0 License. - diff --git a/vendor/github.com/aws/smithy-go/auth/auth.go b/vendor/github.com/aws/smithy-go/auth/auth.go deleted file mode 100644 index 5bdb70c9a7..0000000000 --- a/vendor/github.com/aws/smithy-go/auth/auth.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package auth defines protocol-agnostic authentication types for smithy -// clients. -package auth diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/docs.go b/vendor/github.com/aws/smithy-go/auth/bearer/docs.go deleted file mode 100644 index 1c9b9715cb..0000000000 --- a/vendor/github.com/aws/smithy-go/auth/bearer/docs.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package bearer provides middleware and utilities for authenticating API -// operation calls with a Bearer Token. -package bearer diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go b/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go deleted file mode 100644 index 8c7d720995..0000000000 --- a/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go +++ /dev/null @@ -1,104 +0,0 @@ -package bearer - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Message is the middleware stack's request transport message value. -type Message interface{} - -// Signer provides an interface for implementations to decorate a request -// message with a bearer token. The signer is responsible for validating the -// message type is compatible with the signer. -type Signer interface { - SignWithBearerToken(context.Context, Token, Message) (Message, error) -} - -// AuthenticationMiddleware provides the Finalize middleware step for signing -// an request message with a bearer token. -type AuthenticationMiddleware struct { - signer Signer - tokenProvider TokenProvider -} - -// AddAuthenticationMiddleware helper adds the AuthenticationMiddleware to the -// middleware Stack in the Finalize step with the options provided. -func AddAuthenticationMiddleware(s *middleware.Stack, signer Signer, tokenProvider TokenProvider) error { - return s.Finalize.Add( - NewAuthenticationMiddleware(signer, tokenProvider), - middleware.After, - ) -} - -// NewAuthenticationMiddleware returns an initialized AuthenticationMiddleware. -func NewAuthenticationMiddleware(signer Signer, tokenProvider TokenProvider) *AuthenticationMiddleware { - return &AuthenticationMiddleware{ - signer: signer, - tokenProvider: tokenProvider, - } -} - -const authenticationMiddlewareID = "BearerTokenAuthentication" - -// ID returns the resolver identifier -func (m *AuthenticationMiddleware) ID() string { - return authenticationMiddlewareID -} - -// HandleFinalize implements the FinalizeMiddleware interface in order to -// update the request with bearer token authentication. -func (m *AuthenticationMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - token, err := m.tokenProvider.RetrieveBearerToken(ctx) - if err != nil { - return out, metadata, fmt.Errorf("failed AuthenticationMiddleware wrap message, %w", err) - } - - signedMessage, err := m.signer.SignWithBearerToken(ctx, token, in.Request) - if err != nil { - return out, metadata, fmt.Errorf("failed AuthenticationMiddleware sign message, %w", err) - } - - in.Request = signedMessage - return next.HandleFinalize(ctx, in) -} - -// SignHTTPSMessage provides a bearer token authentication implementation that -// will sign the message with the provided bearer token. -// -// Will fail if the message is not a smithy-go HTTP request or the request is -// not HTTPS. -type SignHTTPSMessage struct{} - -// NewSignHTTPSMessage returns an initialized signer for HTTP messages. -func NewSignHTTPSMessage() *SignHTTPSMessage { - return &SignHTTPSMessage{} -} - -// SignWithBearerToken returns a copy of the HTTP request with the bearer token -// added via the "Authorization" header, per RFC 6750, https://datatracker.ietf.org/doc/html/rfc6750. -// -// Returns an error if the request's URL scheme is not HTTPS, or the request -// message is not an smithy-go HTTP Request pointer type. -func (SignHTTPSMessage) SignWithBearerToken(ctx context.Context, token Token, message Message) (Message, error) { - req, ok := message.(*smithyhttp.Request) - if !ok { - return nil, fmt.Errorf("expect smithy-go HTTP Request, got %T", message) - } - - if !req.IsHTTPS() { - return nil, fmt.Errorf("bearer token with HTTP request requires HTTPS") - } - - reqClone := req.Clone() - reqClone.Header.Set("Authorization", "Bearer "+token.Value) - - return reqClone, nil -} diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token.go b/vendor/github.com/aws/smithy-go/auth/bearer/token.go deleted file mode 100644 index be260d4c76..0000000000 --- a/vendor/github.com/aws/smithy-go/auth/bearer/token.go +++ /dev/null @@ -1,50 +0,0 @@ -package bearer - -import ( - "context" - "time" -) - -// Token provides a type wrapping a bearer token and expiration metadata. -type Token struct { - Value string - - CanExpire bool - Expires time.Time -} - -// Expired returns if the token's Expires time is before or equal to the time -// provided. If CanExpires is false, Expired will always return false. -func (t Token) Expired(now time.Time) bool { - if !t.CanExpire { - return false - } - now = now.Round(0) - return now.Equal(t.Expires) || now.After(t.Expires) -} - -// TokenProvider provides interface for retrieving bearer tokens. -type TokenProvider interface { - RetrieveBearerToken(context.Context) (Token, error) -} - -// TokenProviderFunc provides a helper utility to wrap a function as a type -// that implements the TokenProvider interface. -type TokenProviderFunc func(context.Context) (Token, error) - -// RetrieveBearerToken calls the wrapped function, returning the Token or -// error. -func (fn TokenProviderFunc) RetrieveBearerToken(ctx context.Context) (Token, error) { - return fn(ctx) -} - -// StaticTokenProvider provides a utility for wrapping a static bearer token -// value within an implementation of a token provider. -type StaticTokenProvider struct { - Token Token -} - -// RetrieveBearerToken returns the static token specified. -func (s StaticTokenProvider) RetrieveBearerToken(context.Context) (Token, error) { - return s.Token, nil -} diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go b/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go deleted file mode 100644 index 223ddf52bb..0000000000 --- a/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go +++ /dev/null @@ -1,208 +0,0 @@ -package bearer - -import ( - "context" - "fmt" - "sync/atomic" - "time" - - smithycontext "github.com/aws/smithy-go/context" - "github.com/aws/smithy-go/internal/sync/singleflight" -) - -// package variable that can be override in unit tests. -var timeNow = time.Now - -// TokenCacheOptions provides a set of optional configuration options for the -// TokenCache TokenProvider. -type TokenCacheOptions struct { - // The duration before the token will expire when the credentials will be - // refreshed. If DisableAsyncRefresh is true, the RetrieveBearerToken calls - // will be blocking. - // - // Asynchronous refreshes are deduplicated, and only one will be in-flight - // at a time. If the token expires while an asynchronous refresh is in - // flight, the next call to RetrieveBearerToken will block on that refresh - // to return. - RefreshBeforeExpires time.Duration - - // The timeout the underlying TokenProvider's RetrieveBearerToken call must - // return within, or will be canceled. Defaults to 0, no timeout. - // - // If 0 timeout, its possible for the underlying tokenProvider's - // RetrieveBearerToken call to block forever. Preventing subsequent - // TokenCache attempts to refresh the token. - // - // If this timeout is reached all pending deduplicated calls to - // TokenCache RetrieveBearerToken will fail with an error. - RetrieveBearerTokenTimeout time.Duration - - // The minimum duration between asynchronous refresh attempts. If the next - // asynchronous recent refresh attempt was within the minimum delay - // duration, the call to retrieve will return the current cached token, if - // not expired. - // - // The asynchronous retrieve is deduplicated across multiple calls when - // RetrieveBearerToken is called. The asynchronous retrieve is not a - // periodic task. It is only performed when the token has not yet expired, - // and the current item is within the RefreshBeforeExpires window, and the - // TokenCache's RetrieveBearerToken method is called. - // - // If 0, (default) there will be no minimum delay between asynchronous - // refresh attempts. - // - // If DisableAsyncRefresh is true, this option is ignored. - AsyncRefreshMinimumDelay time.Duration - - // Sets if the TokenCache will attempt to refresh the token in the - // background asynchronously instead of blocking for credentials to be - // refreshed. If disabled token refresh will be blocking. - // - // The first call to RetrieveBearerToken will always be blocking, because - // there is no cached token. - DisableAsyncRefresh bool -} - -// TokenCache provides an utility to cache Bearer Authentication tokens from a -// wrapped TokenProvider. The TokenCache can be has options to configure the -// cache's early and asynchronous refresh of the token. -type TokenCache struct { - options TokenCacheOptions - provider TokenProvider - - cachedToken atomic.Value - lastRefreshAttemptTime atomic.Value - sfGroup singleflight.Group -} - -// NewTokenCache returns a initialized TokenCache that implements the -// TokenProvider interface. Wrapping the provider passed in. Also taking a set -// of optional functional option parameters to configure the token cache. -func NewTokenCache(provider TokenProvider, optFns ...func(*TokenCacheOptions)) *TokenCache { - var options TokenCacheOptions - for _, fn := range optFns { - fn(&options) - } - - return &TokenCache{ - options: options, - provider: provider, - } -} - -// RetrieveBearerToken returns the token if it could be obtained, or error if a -// valid token could not be retrieved. -// -// The passed in Context's cancel/deadline/timeout will impacting only this -// individual retrieve call and not any other already queued up calls. This -// means underlying provider's RetrieveBearerToken calls could block for ever, -// and not be canceled with the Context. Set RetrieveBearerTokenTimeout to -// provide a timeout, preventing the underlying TokenProvider blocking forever. -// -// By default, if the passed in Context is canceled, all of its values will be -// considered expired. The wrapped TokenProvider will not be able to lookup the -// values from the Context once it is expired. This is done to protect against -// expired values no longer being valid. To disable this behavior, use -// smithy-go's context.WithPreserveExpiredValues to add a value to the Context -// before calling RetrieveBearerToken to enable support for expired values. -// -// Without RetrieveBearerTokenTimeout there is the potential for a underlying -// Provider's RetrieveBearerToken call to sit forever. Blocking in subsequent -// attempts at refreshing the token. -func (p *TokenCache) RetrieveBearerToken(ctx context.Context) (Token, error) { - cachedToken, ok := p.getCachedToken() - if !ok || cachedToken.Expired(timeNow()) { - return p.refreshBearerToken(ctx) - } - - // Check if the token should be refreshed before it expires. - refreshToken := cachedToken.Expired(timeNow().Add(p.options.RefreshBeforeExpires)) - if !refreshToken { - return cachedToken, nil - } - - if p.options.DisableAsyncRefresh { - return p.refreshBearerToken(ctx) - } - - p.tryAsyncRefresh(ctx) - - return cachedToken, nil -} - -// tryAsyncRefresh attempts to asynchronously refresh the token returning the -// already cached token. If it AsyncRefreshMinimumDelay option is not zero, and -// the duration since the last refresh is less than that value, nothing will be -// done. -func (p *TokenCache) tryAsyncRefresh(ctx context.Context) { - if p.options.AsyncRefreshMinimumDelay != 0 { - var lastRefreshAttempt time.Time - if v := p.lastRefreshAttemptTime.Load(); v != nil { - lastRefreshAttempt = v.(time.Time) - } - - if timeNow().Before(lastRefreshAttempt.Add(p.options.AsyncRefreshMinimumDelay)) { - return - } - } - - // Ignore the returned channel so this won't be blocking, and limit the - // number of additional goroutines created. - p.sfGroup.DoChan("async-refresh", func() (interface{}, error) { - res, err := p.refreshBearerToken(ctx) - if p.options.AsyncRefreshMinimumDelay != 0 { - var refreshAttempt time.Time - if err != nil { - refreshAttempt = timeNow() - } - p.lastRefreshAttemptTime.Store(refreshAttempt) - } - - return res, err - }) -} - -func (p *TokenCache) refreshBearerToken(ctx context.Context) (Token, error) { - resCh := p.sfGroup.DoChan("refresh-token", func() (interface{}, error) { - ctx := smithycontext.WithSuppressCancel(ctx) - if v := p.options.RetrieveBearerTokenTimeout; v != 0 { - var cancel func() - ctx, cancel = context.WithTimeout(ctx, v) - defer cancel() - } - return p.singleRetrieve(ctx) - }) - - select { - case res := <-resCh: - return res.Val.(Token), res.Err - case <-ctx.Done(): - return Token{}, fmt.Errorf("retrieve bearer token canceled, %w", ctx.Err()) - } -} - -func (p *TokenCache) singleRetrieve(ctx context.Context) (interface{}, error) { - token, err := p.provider.RetrieveBearerToken(ctx) - if err != nil { - return Token{}, fmt.Errorf("failed to retrieve bearer token, %w", err) - } - - p.cachedToken.Store(&token) - return token, nil -} - -// getCachedToken returns the currently cached token and true if found. Returns -// false if no token is cached. -func (p *TokenCache) getCachedToken() (Token, bool) { - v := p.cachedToken.Load() - if v == nil { - return Token{}, false - } - - t := v.(*Token) - if t == nil || t.Value == "" { - return Token{}, false - } - - return *t, true -} diff --git a/vendor/github.com/aws/smithy-go/auth/identity.go b/vendor/github.com/aws/smithy-go/auth/identity.go deleted file mode 100644 index ba8cf70d4d..0000000000 --- a/vendor/github.com/aws/smithy-go/auth/identity.go +++ /dev/null @@ -1,47 +0,0 @@ -package auth - -import ( - "context" - "time" - - "github.com/aws/smithy-go" -) - -// Identity contains information that identifies who the user making the -// request is. -type Identity interface { - Expiration() time.Time -} - -// IdentityResolver defines the interface through which an Identity is -// retrieved. -type IdentityResolver interface { - GetIdentity(context.Context, smithy.Properties) (Identity, error) -} - -// IdentityResolverOptions defines the interface through which an entity can be -// queried to retrieve an IdentityResolver for a given auth scheme. -type IdentityResolverOptions interface { - GetIdentityResolver(schemeID string) IdentityResolver -} - -// AnonymousIdentity is a sentinel to indicate no identity. -type AnonymousIdentity struct{} - -var _ Identity = (*AnonymousIdentity)(nil) - -// Expiration returns the zero value for time, as anonymous identity never -// expires. -func (*AnonymousIdentity) Expiration() time.Time { - return time.Time{} -} - -// AnonymousIdentityResolver returns AnonymousIdentity. -type AnonymousIdentityResolver struct{} - -var _ IdentityResolver = (*AnonymousIdentityResolver)(nil) - -// GetIdentity returns AnonymousIdentity. -func (*AnonymousIdentityResolver) GetIdentity(_ context.Context, _ smithy.Properties) (Identity, error) { - return &AnonymousIdentity{}, nil -} diff --git a/vendor/github.com/aws/smithy-go/auth/option.go b/vendor/github.com/aws/smithy-go/auth/option.go deleted file mode 100644 index d5dabff04b..0000000000 --- a/vendor/github.com/aws/smithy-go/auth/option.go +++ /dev/null @@ -1,25 +0,0 @@ -package auth - -import "github.com/aws/smithy-go" - -type ( - authOptionsKey struct{} -) - -// Option represents a possible authentication method for an operation. -type Option struct { - SchemeID string - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -// GetAuthOptions gets auth Options from Properties. -func GetAuthOptions(p *smithy.Properties) ([]*Option, bool) { - v, ok := p.Get(authOptionsKey{}).([]*Option) - return v, ok -} - -// SetAuthOptions sets auth Options on Properties. -func SetAuthOptions(p *smithy.Properties, options []*Option) { - p.Set(authOptionsKey{}, options) -} diff --git a/vendor/github.com/aws/smithy-go/auth/scheme_id.go b/vendor/github.com/aws/smithy-go/auth/scheme_id.go deleted file mode 100644 index fb6a57c640..0000000000 --- a/vendor/github.com/aws/smithy-go/auth/scheme_id.go +++ /dev/null @@ -1,20 +0,0 @@ -package auth - -// Anonymous -const ( - SchemeIDAnonymous = "smithy.api#noAuth" -) - -// HTTP auth schemes -const ( - SchemeIDHTTPBasic = "smithy.api#httpBasicAuth" - SchemeIDHTTPDigest = "smithy.api#httpDigestAuth" - SchemeIDHTTPBearer = "smithy.api#httpBearerAuth" - SchemeIDHTTPAPIKey = "smithy.api#httpApiKeyAuth" -) - -// AWS auth schemes -const ( - SchemeIDSigV4 = "aws.auth#sigv4" - SchemeIDSigV4A = "aws.auth#sigv4a" -) diff --git a/vendor/github.com/aws/smithy-go/context/suppress_expired.go b/vendor/github.com/aws/smithy-go/context/suppress_expired.go deleted file mode 100644 index a39b84a278..0000000000 --- a/vendor/github.com/aws/smithy-go/context/suppress_expired.go +++ /dev/null @@ -1,81 +0,0 @@ -package context - -import "context" - -// valueOnlyContext provides a utility to preserve only the values of a -// Context. Suppressing any cancellation or deadline on that context being -// propagated downstream of this value. -// -// If preserveExpiredValues is false (default), and the valueCtx is canceled, -// calls to lookup values with the Values method, will always return nil. Setting -// preserveExpiredValues to true, will allow the valueOnlyContext to lookup -// values in valueCtx even if valueCtx is canceled. -// -// Based on the Go standard libraries net/lookup.go onlyValuesCtx utility. -// https://github.com/golang/go/blob/da2773fe3e2f6106634673a38dc3a6eb875fe7d8/src/net/lookup.go -type valueOnlyContext struct { - context.Context - - preserveExpiredValues bool - valuesCtx context.Context -} - -var _ context.Context = (*valueOnlyContext)(nil) - -// Value looks up the key, returning its value. If configured to not preserve -// values of expired context, and the wrapping context is canceled, nil will be -// returned. -func (v *valueOnlyContext) Value(key interface{}) interface{} { - if !v.preserveExpiredValues { - select { - case <-v.valuesCtx.Done(): - return nil - default: - } - } - - return v.valuesCtx.Value(key) -} - -// WithSuppressCancel wraps the Context value, suppressing its deadline and -// cancellation events being propagated downstream to consumer of the returned -// context. -// -// By default the wrapped Context's Values are available downstream until the -// wrapped Context is canceled. Once the wrapped Context is canceled, Values -// method called on the context return will no longer lookup any key. As they -// are now considered expired. -// -// To override this behavior, use WithPreserveExpiredValues on the Context -// before it is wrapped by WithSuppressCancel. This will make the Context -// returned by WithSuppressCancel allow lookup of expired values. -func WithSuppressCancel(ctx context.Context) context.Context { - return &valueOnlyContext{ - Context: context.Background(), - valuesCtx: ctx, - - preserveExpiredValues: GetPreserveExpiredValues(ctx), - } -} - -type preserveExpiredValuesKey struct{} - -// WithPreserveExpiredValues adds a Value to the Context if expired values -// should be preserved, and looked up by a Context wrapped by -// WithSuppressCancel. -// -// WithPreserveExpiredValues must be added as a value to a Context, before that -// Context is wrapped by WithSuppressCancel -func WithPreserveExpiredValues(ctx context.Context, enable bool) context.Context { - return context.WithValue(ctx, preserveExpiredValuesKey{}, enable) -} - -// GetPreserveExpiredValues looks up, and returns the PreserveExpressValues -// value in the context. Returning true if enabled, false otherwise. -func GetPreserveExpiredValues(ctx context.Context) bool { - v := ctx.Value(preserveExpiredValuesKey{}) - if v != nil { - return v.(bool) - } - return false -} diff --git a/vendor/github.com/aws/smithy-go/doc.go b/vendor/github.com/aws/smithy-go/doc.go deleted file mode 100644 index 87b0c74b75..0000000000 --- a/vendor/github.com/aws/smithy-go/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package smithy provides the core components for a Smithy SDK. -package smithy diff --git a/vendor/github.com/aws/smithy-go/document.go b/vendor/github.com/aws/smithy-go/document.go deleted file mode 100644 index dec498c57b..0000000000 --- a/vendor/github.com/aws/smithy-go/document.go +++ /dev/null @@ -1,10 +0,0 @@ -package smithy - -// Document provides access to loosely structured data in a document-like -// format. -// -// Deprecated: See the github.com/aws/smithy-go/document package. -type Document interface { - UnmarshalDocument(interface{}) error - GetValue() (interface{}, error) -} diff --git a/vendor/github.com/aws/smithy-go/document/doc.go b/vendor/github.com/aws/smithy-go/document/doc.go deleted file mode 100644 index 03055b7a1c..0000000000 --- a/vendor/github.com/aws/smithy-go/document/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package document provides interface definitions and error types for document types. -// -// A document is a protocol-agnostic type which supports a JSON-like data-model. You can use this type to send -// UTF-8 strings, arbitrary precision numbers, booleans, nulls, a list of these values, and a map of UTF-8 -// strings to these values. -// -// API Clients expose document constructors in their respective client document packages which must be used to -// Marshal and Unmarshal Go types to and from their respective protocol representations. -// -// See the Marshaler and Unmarshaler type documentation for more details on how to Go types can be converted to and from -// document types. -package document diff --git a/vendor/github.com/aws/smithy-go/document/document.go b/vendor/github.com/aws/smithy-go/document/document.go deleted file mode 100644 index 8f852d95c6..0000000000 --- a/vendor/github.com/aws/smithy-go/document/document.go +++ /dev/null @@ -1,153 +0,0 @@ -package document - -import ( - "fmt" - "math/big" - "strconv" -) - -// Marshaler is an interface for a type that marshals a document to its protocol-specific byte representation and -// returns the resulting bytes. A non-nil error will be returned if an error is encountered during marshaling. -// -// Marshal supports basic scalars (int,uint,float,bool,string), big.Int, and big.Float, maps, slices, and structs. -// Anonymous nested types are flattened based on Go anonymous type visibility. -// -// When defining struct types. the `document` struct tag can be used to control how the value will be -// marshaled into the resulting protocol document. -// -// // Field is ignored -// Field int `document:"-"` -// -// // Field object of key "myName" -// Field int `document:"myName"` -// -// // Field object key of key "myName", and -// // Field is omitted if the field is a zero value for the type. -// Field int `document:"myName,omitempty"` -// -// // Field object key of "Field", and -// // Field is omitted if the field is a zero value for the type. -// Field int `document:",omitempty"` -// -// All struct fields, including anonymous fields, are marshaled unless the -// any of the following conditions are meet. -// -// - the field is not exported -// - document field tag is "-" -// - document field tag specifies "omitempty", and is a zero value. -// -// Pointer and interface values are encoded as the value pointed to or -// contained in the interface. A nil value encodes as a null -// value unless `omitempty` struct tag is provided. -// -// Channel, complex, and function values are not encoded and will be skipped -// when walking the value to be marshaled. -// -// time.Time is not supported and will cause the Marshaler to return an error. These values should be represented -// by your application as a string or numerical representation. -// -// Errors that occur when marshaling will stop the marshaler, and return the error. -// -// Marshal cannot represent cyclic data structures and will not handle them. -// Passing cyclic structures to Marshal will result in an infinite recursion. -type Marshaler interface { - MarshalSmithyDocument() ([]byte, error) -} - -// Unmarshaler is an interface for a type that unmarshals a document from its protocol-specific representation, and -// stores the result into the value pointed by v. If v is nil or not a pointer then InvalidUnmarshalError will be -// returned. -// -// Unmarshaler supports the same encodings produced by a document Marshaler. This includes support for the `document` -// struct field tag for controlling how struct fields are unmarshaled. -// -// Both generic interface{} and concrete types are valid unmarshal destination types. When unmarshaling a document -// into an empty interface the Unmarshaler will store one of these values: -// bool, for boolean values -// document.Number, for arbitrary-precision numbers (int64, float64, big.Int, big.Float) -// string, for string values -// []interface{}, for array values -// map[string]interface{}, for objects -// nil, for null values -// -// When unmarshaling, any error that occurs will halt the unmarshal and return the error. -type Unmarshaler interface { - UnmarshalSmithyDocument(v interface{}) error -} - -type noSerde interface { - noSmithyDocumentSerde() -} - -// NoSerde is a sentinel value to indicate that a given type should not be marshaled or unmarshaled -// into a protocol document. -type NoSerde struct{} - -func (n NoSerde) noSmithyDocumentSerde() {} - -var _ noSerde = (*NoSerde)(nil) - -// IsNoSerde returns whether the given type implements the no smithy document serde interface. -func IsNoSerde(x interface{}) bool { - _, ok := x.(noSerde) - return ok -} - -// Number is an arbitrary precision numerical value -type Number string - -// Int64 returns the number as a string. -func (n Number) String() string { - return string(n) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return n.intOfBitSize(64) -} - -func (n Number) intOfBitSize(bitSize int) (int64, error) { - return strconv.ParseInt(string(n), 10, bitSize) -} - -// Uint64 returns the number as a uint64. -func (n Number) Uint64() (uint64, error) { - return n.uintOfBitSize(64) -} - -func (n Number) uintOfBitSize(bitSize int) (uint64, error) { - return strconv.ParseUint(string(n), 10, bitSize) -} - -// Float32 returns the number parsed as a 32-bit float, returns a float64. -func (n Number) Float32() (float64, error) { - return n.floatOfBitSize(32) -} - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return n.floatOfBitSize(64) -} - -// Float64 returns the number as a float64. -func (n Number) floatOfBitSize(bitSize int) (float64, error) { - return strconv.ParseFloat(string(n), bitSize) -} - -// BigFloat attempts to convert the number to a big.Float, returns an error if the operation fails. -func (n Number) BigFloat() (*big.Float, error) { - f, ok := (&big.Float{}).SetString(string(n)) - if !ok { - return nil, fmt.Errorf("failed to convert to big.Float") - } - return f, nil -} - -// BigInt attempts to convert the number to a big.Int, returns an error if the operation fails. -func (n Number) BigInt() (*big.Int, error) { - f, ok := (&big.Int{}).SetString(string(n), 10) - if !ok { - return nil, fmt.Errorf("failed to convert to big.Float") - } - return f, nil -} diff --git a/vendor/github.com/aws/smithy-go/document/errors.go b/vendor/github.com/aws/smithy-go/document/errors.go deleted file mode 100644 index 046a7a7653..0000000000 --- a/vendor/github.com/aws/smithy-go/document/errors.go +++ /dev/null @@ -1,75 +0,0 @@ -package document - -import ( - "fmt" - "reflect" -) - -// UnmarshalTypeError is an error type representing an error -// unmarshaling a Smithy document to a Go value type. This is different -// from UnmarshalError in that it does not wrap an underlying error type. -type UnmarshalTypeError struct { - Value string - Type reflect.Type -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *UnmarshalTypeError) Error() string { - return fmt.Sprintf("unmarshal failed, cannot unmarshal %s into Go value type %s", - e.Value, e.Type.String()) -} - -// An InvalidUnmarshalError is an error type representing an invalid type -// encountered while unmarshaling a Smithy document to a Go value type. -type InvalidUnmarshalError struct { - Type reflect.Type -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *InvalidUnmarshalError) Error() string { - var msg string - if e.Type == nil { - msg = "cannot unmarshal to nil value" - } else if e.Type.Kind() != reflect.Ptr { - msg = fmt.Sprintf("cannot unmarshal to non-pointer value, got %s", e.Type.String()) - } else { - msg = fmt.Sprintf("cannot unmarshal to nil value, %s", e.Type.String()) - } - - return fmt.Sprintf("unmarshal failed, %s", msg) -} - -// An UnmarshalError wraps an error that occurred while unmarshaling a -// Smithy document into a Go type. This is different from -// UnmarshalTypeError in that it wraps the underlying error that occurred. -type UnmarshalError struct { - Err error - Value string - Type reflect.Type -} - -// Unwrap returns the underlying unmarshaling error -func (e *UnmarshalError) Unwrap() error { - return e.Err -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *UnmarshalError) Error() string { - return fmt.Sprintf("unmarshal failed, cannot unmarshal %q into %s, %v", - e.Value, e.Type.String(), e.Err) -} - -// An InvalidMarshalError is an error type representing an error -// occurring when marshaling a Go value type. -type InvalidMarshalError struct { - Message string -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *InvalidMarshalError) Error() string { - return fmt.Sprintf("marshal failed, %s", e.Message) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/doc.go b/vendor/github.com/aws/smithy-go/encoding/doc.go deleted file mode 100644 index 792fdfa08b..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package encoding provides utilities for encoding values for specific -// document encodings. - -package encoding diff --git a/vendor/github.com/aws/smithy-go/encoding/encoding.go b/vendor/github.com/aws/smithy-go/encoding/encoding.go deleted file mode 100644 index 2fdfb52250..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/encoding.go +++ /dev/null @@ -1,40 +0,0 @@ -package encoding - -import ( - "fmt" - "math" - "strconv" -) - -// EncodeFloat encodes a float value as per the stdlib encoder for json and xml protocol -// This encodes a float value into dst while attempting to conform to ES6 ToString for Numbers -// -// Based on encoding/json floatEncoder from the Go Standard Library -// https://golang.org/src/encoding/json/encode.go -func EncodeFloat(dst []byte, v float64, bits int) []byte { - if math.IsInf(v, 0) || math.IsNaN(v) { - panic(fmt.Sprintf("invalid float value: %s", strconv.FormatFloat(v, 'g', -1, bits))) - } - - abs := math.Abs(v) - fmt := byte('f') - - if abs != 0 { - if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { - fmt = 'e' - } - } - - dst = strconv.AppendFloat(dst, v, fmt, -1, bits) - - if fmt == 'e' { - // clean up e-09 to e-9 - n := len(dst) - if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' { - dst[n-2] = dst[n-1] - dst = dst[:n-1] - } - } - - return dst -} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go deleted file mode 100644 index 543e7cf038..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go +++ /dev/null @@ -1,123 +0,0 @@ -package httpbinding - -import ( - "fmt" - "net/http" - "net/url" - "strconv" - "strings" -) - -const ( - contentLengthHeader = "Content-Length" - floatNaN = "NaN" - floatInfinity = "Infinity" - floatNegInfinity = "-Infinity" -) - -// An Encoder provides encoding of REST URI path, query, and header components -// of an HTTP request. Can also encode a stream as the payload. -// -// Does not support SetFields. -type Encoder struct { - path, rawPath, pathBuffer []byte - - query url.Values - header http.Header -} - -// NewEncoder creates a new encoder from the passed in request. It assumes that -// raw path contains no valuable information at this point, so it passes in path -// as path and raw path for subsequent trans -func NewEncoder(path, query string, headers http.Header) (*Encoder, error) { - return NewEncoderWithRawPath(path, path, query, headers) -} - -// NewHTTPBindingEncoder creates a new encoder from the passed in request. All query and -// header values will be added on top of the request's existing values. Overwriting -// duplicate values. -func NewEncoderWithRawPath(path, rawPath, query string, headers http.Header) (*Encoder, error) { - parseQuery, err := url.ParseQuery(query) - if err != nil { - return nil, fmt.Errorf("failed to parse query string: %w", err) - } - - e := &Encoder{ - path: []byte(path), - rawPath: []byte(rawPath), - query: parseQuery, - header: headers.Clone(), - } - - return e, nil -} - -// Encode returns a REST protocol encoder for encoding HTTP bindings. -// -// Due net/http requiring `Content-Length` to be specified on the http.Request#ContentLength directly. Encode -// will look for whether the header is present, and if so will remove it and set the respective value on http.Request. -// -// Returns any error occurring during encoding. -func (e *Encoder) Encode(req *http.Request) (*http.Request, error) { - req.URL.Path, req.URL.RawPath = string(e.path), string(e.rawPath) - req.URL.RawQuery = e.query.Encode() - - // net/http ignores Content-Length header and requires it to be set on http.Request - if v := e.header.Get(contentLengthHeader); len(v) > 0 { - iv, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return nil, err - } - req.ContentLength = iv - e.header.Del(contentLengthHeader) - } - - req.Header = e.header - - return req, nil -} - -// AddHeader returns a HeaderValue for appending to the given header name -func (e *Encoder) AddHeader(key string) HeaderValue { - return newHeaderValue(e.header, key, true) -} - -// SetHeader returns a HeaderValue for setting the given header name -func (e *Encoder) SetHeader(key string) HeaderValue { - return newHeaderValue(e.header, key, false) -} - -// Headers returns a Header used for encoding headers with the given prefix -func (e *Encoder) Headers(prefix string) Headers { - return Headers{ - header: e.header, - prefix: strings.TrimSpace(prefix), - } -} - -// HasHeader returns if a header with the key specified exists with one or -// more value. -func (e Encoder) HasHeader(key string) bool { - return len(e.header[key]) != 0 -} - -// SetURI returns a URIValue used for setting the given path key -func (e *Encoder) SetURI(key string) URIValue { - return newURIValue(&e.path, &e.rawPath, &e.pathBuffer, key) -} - -// SetQuery returns a QueryValue used for setting the given query key -func (e *Encoder) SetQuery(key string) QueryValue { - return NewQueryValue(e.query, key, false) -} - -// AddQuery returns a QueryValue used for appending the given query key -func (e *Encoder) AddQuery(key string) QueryValue { - return NewQueryValue(e.query, key, true) -} - -// HasQuery returns if a query with the key specified exists with one or -// more values. -func (e *Encoder) HasQuery(key string) bool { - return len(e.query.Get(key)) != 0 -} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go deleted file mode 100644 index f9256e175f..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go +++ /dev/null @@ -1,122 +0,0 @@ -package httpbinding - -import ( - "encoding/base64" - "math" - "math/big" - "net/http" - "strconv" - "strings" -) - -// Headers is used to encode header keys using a provided prefix -type Headers struct { - header http.Header - prefix string -} - -// AddHeader returns a HeaderValue used to append values to prefix+key -func (h Headers) AddHeader(key string) HeaderValue { - return h.newHeaderValue(key, true) -} - -// SetHeader returns a HeaderValue used to set the value of prefix+key -func (h Headers) SetHeader(key string) HeaderValue { - return h.newHeaderValue(key, false) -} - -func (h Headers) newHeaderValue(key string, append bool) HeaderValue { - return newHeaderValue(h.header, h.prefix+strings.TrimSpace(key), append) -} - -// HeaderValue is used to encode values to an HTTP header -type HeaderValue struct { - header http.Header - key string - append bool -} - -func newHeaderValue(header http.Header, key string, append bool) HeaderValue { - return HeaderValue{header: header, key: strings.TrimSpace(key), append: append} -} - -func (h HeaderValue) modifyHeader(value string) { - if h.append { - h.header[h.key] = append(h.header[h.key], value) - } else { - h.header[h.key] = append(h.header[h.key][:0], value) - } -} - -// String encodes the value v as the header string value -func (h HeaderValue) String(v string) { - h.modifyHeader(v) -} - -// Byte encodes the value v as a query string value -func (h HeaderValue) Byte(v int8) { - h.Long(int64(v)) -} - -// Short encodes the value v as a query string value -func (h HeaderValue) Short(v int16) { - h.Long(int64(v)) -} - -// Integer encodes the value v as the header string value -func (h HeaderValue) Integer(v int32) { - h.Long(int64(v)) -} - -// Long encodes the value v as the header string value -func (h HeaderValue) Long(v int64) { - h.modifyHeader(strconv.FormatInt(v, 10)) -} - -// Boolean encodes the value v as a query string value -func (h HeaderValue) Boolean(v bool) { - h.modifyHeader(strconv.FormatBool(v)) -} - -// Float encodes the value v as a query string value -func (h HeaderValue) Float(v float32) { - h.float(float64(v), 32) -} - -// Double encodes the value v as a query string value -func (h HeaderValue) Double(v float64) { - h.float(v, 64) -} - -func (h HeaderValue) float(v float64, bitSize int) { - switch { - case math.IsNaN(v): - h.String(floatNaN) - case math.IsInf(v, 1): - h.String(floatInfinity) - case math.IsInf(v, -1): - h.String(floatNegInfinity) - default: - h.modifyHeader(strconv.FormatFloat(v, 'f', -1, bitSize)) - } -} - -// BigInteger encodes the value v as a query string value -func (h HeaderValue) BigInteger(v *big.Int) { - h.modifyHeader(v.String()) -} - -// BigDecimal encodes the value v as a query string value -func (h HeaderValue) BigDecimal(v *big.Float) { - if i, accuracy := v.Int64(); accuracy == big.Exact { - h.Long(i) - return - } - h.modifyHeader(v.Text('e', -1)) -} - -// Blob encodes the value v as a base64 header string value -func (h HeaderValue) Blob(v []byte) { - encodeToString := base64.StdEncoding.EncodeToString(v) - h.modifyHeader(encodeToString) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go deleted file mode 100644 index e78926c9a5..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go +++ /dev/null @@ -1,108 +0,0 @@ -package httpbinding - -import ( - "bytes" - "fmt" -) - -const ( - uriTokenStart = '{' - uriTokenStop = '}' - uriTokenSkip = '+' -) - -func bufCap(b []byte, n int) []byte { - if cap(b) < n { - return make([]byte, 0, n) - } - - return b[0:0] -} - -// replacePathElement replaces a single element in the path []byte. -// Escape is used to control whether the value will be escaped using Amazon path escape style. -func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) { - fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] } - fieldBuf = append(fieldBuf, uriTokenStart) - fieldBuf = append(fieldBuf, key...) - - start := bytes.Index(path, fieldBuf) - end := start + len(fieldBuf) - if start < 0 || len(path[end:]) == 0 { - // TODO what to do about error? - return path, fieldBuf, fmt.Errorf("invalid path index, start=%d,end=%d. %s", start, end, path) - } - - encodeSep := true - if path[end] == uriTokenSkip { - // '+' token means do not escape slashes - encodeSep = false - end++ - } - - if escape { - val = EscapePath(val, encodeSep) - } - - if path[end] != uriTokenStop { - return path, fieldBuf, fmt.Errorf("invalid path element, does not contain token stop, %s", path) - } - end++ - - fieldBuf = bufCap(fieldBuf, len(val)) - fieldBuf = append(fieldBuf, val...) - - keyLen := end - start - valLen := len(fieldBuf) - - if keyLen == valLen { - copy(path[start:], fieldBuf) - return path, fieldBuf, nil - } - - newLen := len(path) + (valLen - keyLen) - if len(path) < newLen { - path = path[:cap(path)] - } - if cap(path) < newLen { - newURI := make([]byte, newLen) - copy(newURI, path) - path = newURI - } - - // shift - copy(path[start+valLen:], path[end:]) - path = path[:newLen] - copy(path[start:], fieldBuf) - - return path, fieldBuf, nil -} - -// EscapePath escapes part of a URL path in Amazon style. -func EscapePath(path string, encodeSep bool) string { - var buf bytes.Buffer - for i := 0; i < len(path); i++ { - c := path[i] - if noEscape[c] || (c == '/' && !encodeSep) { - buf.WriteByte(c) - } else { - fmt.Fprintf(&buf, "%%%02X", c) - } - } - return buf.String() -} - -var noEscape [256]bool - -func init() { - for i := 0; i < len(noEscape); i++ { - // AWS expects every character except these to be escaped - noEscape[i] = (i >= 'A' && i <= 'Z') || - (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || - i == '-' || - i == '.' || - i == '_' || - i == '~' - } -} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go deleted file mode 100644 index c2e7d0a20f..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go +++ /dev/null @@ -1,107 +0,0 @@ -package httpbinding - -import ( - "encoding/base64" - "math" - "math/big" - "net/url" - "strconv" -) - -// QueryValue is used to encode query key values -type QueryValue struct { - query url.Values - key string - append bool -} - -// NewQueryValue creates a new QueryValue which enables encoding -// a query value into the given url.Values. -func NewQueryValue(query url.Values, key string, append bool) QueryValue { - return QueryValue{ - query: query, - key: key, - append: append, - } -} - -func (qv QueryValue) updateKey(value string) { - if qv.append { - qv.query.Add(qv.key, value) - } else { - qv.query.Set(qv.key, value) - } -} - -// Blob encodes v as a base64 query string value -func (qv QueryValue) Blob(v []byte) { - encodeToString := base64.StdEncoding.EncodeToString(v) - qv.updateKey(encodeToString) -} - -// Boolean encodes v as a query string value -func (qv QueryValue) Boolean(v bool) { - qv.updateKey(strconv.FormatBool(v)) -} - -// String encodes v as a query string value -func (qv QueryValue) String(v string) { - qv.updateKey(v) -} - -// Byte encodes v as a query string value -func (qv QueryValue) Byte(v int8) { - qv.Long(int64(v)) -} - -// Short encodes v as a query string value -func (qv QueryValue) Short(v int16) { - qv.Long(int64(v)) -} - -// Integer encodes v as a query string value -func (qv QueryValue) Integer(v int32) { - qv.Long(int64(v)) -} - -// Long encodes v as a query string value -func (qv QueryValue) Long(v int64) { - qv.updateKey(strconv.FormatInt(v, 10)) -} - -// Float encodes v as a query string value -func (qv QueryValue) Float(v float32) { - qv.float(float64(v), 32) -} - -// Double encodes v as a query string value -func (qv QueryValue) Double(v float64) { - qv.float(v, 64) -} - -func (qv QueryValue) float(v float64, bitSize int) { - switch { - case math.IsNaN(v): - qv.String(floatNaN) - case math.IsInf(v, 1): - qv.String(floatInfinity) - case math.IsInf(v, -1): - qv.String(floatNegInfinity) - default: - qv.updateKey(strconv.FormatFloat(v, 'f', -1, bitSize)) - } -} - -// BigInteger encodes v as a query string value -func (qv QueryValue) BigInteger(v *big.Int) { - qv.updateKey(v.String()) -} - -// BigDecimal encodes v as a query string value -func (qv QueryValue) BigDecimal(v *big.Float) { - if i, accuracy := v.Int64(); accuracy == big.Exact { - qv.Long(i) - return - } - qv.updateKey(v.Text('e', -1)) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go deleted file mode 100644 index f04e11984a..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go +++ /dev/null @@ -1,111 +0,0 @@ -package httpbinding - -import ( - "math" - "math/big" - "strconv" - "strings" -) - -// URIValue is used to encode named URI parameters -type URIValue struct { - path, rawPath, buffer *[]byte - - key string -} - -func newURIValue(path *[]byte, rawPath *[]byte, buffer *[]byte, key string) URIValue { - return URIValue{path: path, rawPath: rawPath, buffer: buffer, key: key} -} - -func (u URIValue) modifyURI(value string) (err error) { - *u.path, *u.buffer, err = replacePathElement(*u.path, *u.buffer, u.key, value, false) - if err != nil { - return err - } - *u.rawPath, *u.buffer, err = replacePathElement(*u.rawPath, *u.buffer, u.key, value, true) - return err -} - -// Boolean encodes v as a URI string value -func (u URIValue) Boolean(v bool) error { - return u.modifyURI(strconv.FormatBool(v)) -} - -// String encodes v as a URI string value -func (u URIValue) String(v string) error { - return u.modifyURI(v) -} - -// Byte encodes v as a URI string value -func (u URIValue) Byte(v int8) error { - return u.Long(int64(v)) -} - -// Short encodes v as a URI string value -func (u URIValue) Short(v int16) error { - return u.Long(int64(v)) -} - -// Integer encodes v as a URI string value -func (u URIValue) Integer(v int32) error { - return u.Long(int64(v)) -} - -// Long encodes v as a URI string value -func (u URIValue) Long(v int64) error { - return u.modifyURI(strconv.FormatInt(v, 10)) -} - -// Float encodes v as a query string value -func (u URIValue) Float(v float32) error { - return u.float(float64(v), 32) -} - -// Double encodes v as a query string value -func (u URIValue) Double(v float64) error { - return u.float(v, 64) -} - -func (u URIValue) float(v float64, bitSize int) error { - switch { - case math.IsNaN(v): - return u.String(floatNaN) - case math.IsInf(v, 1): - return u.String(floatInfinity) - case math.IsInf(v, -1): - return u.String(floatNegInfinity) - default: - return u.modifyURI(strconv.FormatFloat(v, 'f', -1, bitSize)) - } -} - -// BigInteger encodes v as a query string value -func (u URIValue) BigInteger(v *big.Int) error { - return u.modifyURI(v.String()) -} - -// BigDecimal encodes v as a query string value -func (u URIValue) BigDecimal(v *big.Float) error { - if i, accuracy := v.Int64(); accuracy == big.Exact { - return u.Long(i) - } - return u.modifyURI(v.Text('e', -1)) -} - -// SplitURI parses a Smithy HTTP binding trait URI -func SplitURI(uri string) (path, query string) { - queryStart := strings.IndexRune(uri, '?') - if queryStart == -1 { - path = uri - return path, query - } - - path = uri[:queryStart] - if queryStart+1 >= len(uri) { - return path, query - } - query = uri[queryStart+1:] - - return path, query -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/array.go b/vendor/github.com/aws/smithy-go/encoding/json/array.go deleted file mode 100644 index 7a232f660f..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/array.go +++ /dev/null @@ -1,35 +0,0 @@ -package json - -import ( - "bytes" -) - -// Array represents the encoding of a JSON Array -type Array struct { - w *bytes.Buffer - writeComma bool - scratch *[]byte -} - -func newArray(w *bytes.Buffer, scratch *[]byte) *Array { - w.WriteRune(leftBracket) - return &Array{w: w, scratch: scratch} -} - -// Value adds a new element to the JSON Array. -// Returns a Value type that is used to encode -// the array element. -func (a *Array) Value() Value { - if a.writeComma { - a.w.WriteRune(comma) - } else { - a.writeComma = true - } - - return newValue(a.w, a.scratch) -} - -// Close encodes the end of the JSON Array -func (a *Array) Close() { - a.w.WriteRune(rightBracket) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/constants.go b/vendor/github.com/aws/smithy-go/encoding/json/constants.go deleted file mode 100644 index 91044092ae..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/constants.go +++ /dev/null @@ -1,15 +0,0 @@ -package json - -const ( - leftBrace = '{' - rightBrace = '}' - - leftBracket = '[' - rightBracket = ']' - - comma = ',' - quote = '"' - colon = ':' - - null = "null" -) diff --git a/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go b/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go deleted file mode 100644 index 7050c85b3c..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go +++ /dev/null @@ -1,139 +0,0 @@ -package json - -import ( - "bytes" - "encoding/json" - "fmt" - "io" -) - -// DiscardUnknownField discards unknown fields from a decoder body. -// This function is useful while deserializing a JSON body with additional -// unknown information that should be discarded. -func DiscardUnknownField(decoder *json.Decoder) error { - // This deliberately does not share logic with CollectUnknownField, even - // though it could, because if we were to delegate to that then we'd incur - // extra allocations and general memory usage. - v, err := decoder.Token() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - if _, ok := v.(json.Delim); ok { - for decoder.More() { - err = DiscardUnknownField(decoder) - } - endToken, err := decoder.Token() - if err != nil { - return err - } - if _, ok := endToken.(json.Delim); !ok { - return fmt.Errorf("invalid JSON : expected json delimiter, found %T %v", - endToken, endToken) - } - } - - return nil -} - -// CollectUnknownField grabs the contents of unknown fields from the decoder body -// and returns them as a byte slice. This is useful for skipping unknown fields without -// completely discarding them. -func CollectUnknownField(decoder *json.Decoder) ([]byte, error) { - result, err := collectUnknownField(decoder) - if err != nil { - return nil, err - } - - buff := bytes.NewBuffer(nil) - encoder := json.NewEncoder(buff) - - if err := encoder.Encode(result); err != nil { - return nil, err - } - - return buff.Bytes(), nil -} - -func collectUnknownField(decoder *json.Decoder) (interface{}, error) { - // Grab the initial value. This could either be a concrete value like a string or a a - // delimiter. - token, err := decoder.Token() - if err == io.EOF { - return nil, nil - } - if err != nil { - return nil, err - } - - // If it's an array or object, we'll need to recurse. - delim, ok := token.(json.Delim) - if ok { - var result interface{} - if delim == '{' { - result, err = collectUnknownObject(decoder) - if err != nil { - return nil, err - } - } else { - result, err = collectUnknownArray(decoder) - if err != nil { - return nil, err - } - } - - // Discard the closing token. decoder.Token handles checking for matching delimiters - if _, err := decoder.Token(); err != nil { - return nil, err - } - return result, nil - } - - return token, nil -} - -func collectUnknownArray(decoder *json.Decoder) ([]interface{}, error) { - // We need to create an empty array here instead of a nil array, since by getting - // into this function at all we necessarily have seen a non-nil list. - array := []interface{}{} - - for decoder.More() { - value, err := collectUnknownField(decoder) - if err != nil { - return nil, err - } - array = append(array, value) - } - - return array, nil -} - -func collectUnknownObject(decoder *json.Decoder) (map[string]interface{}, error) { - object := make(map[string]interface{}) - - for decoder.More() { - key, err := collectUnknownField(decoder) - if err != nil { - return nil, err - } - - // Keys have to be strings, which is particularly important as the encoder - // won't except a map with interface{} keys - stringKey, ok := key.(string) - if !ok { - return nil, fmt.Errorf("expected string key, found %T", key) - } - - value, err := collectUnknownField(decoder) - if err != nil { - return nil, err - } - - object[stringKey] = value - } - - return object, nil -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/encoder.go b/vendor/github.com/aws/smithy-go/encoding/json/encoder.go deleted file mode 100644 index 8772953f1e..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/encoder.go +++ /dev/null @@ -1,30 +0,0 @@ -package json - -import ( - "bytes" -) - -// Encoder is JSON encoder that supports construction of JSON values -// using methods. -type Encoder struct { - w *bytes.Buffer - Value -} - -// NewEncoder returns a new JSON encoder -func NewEncoder() *Encoder { - writer := bytes.NewBuffer(nil) - scratch := make([]byte, 64) - - return &Encoder{w: writer, Value: newValue(writer, &scratch)} -} - -// String returns the String output of the JSON encoder -func (e Encoder) String() string { - return e.w.String() -} - -// Bytes returns the []byte slice of the JSON encoder -func (e Encoder) Bytes() []byte { - return e.w.Bytes() -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/escape.go b/vendor/github.com/aws/smithy-go/encoding/json/escape.go deleted file mode 100644 index d984d0cdca..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/escape.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copied and modified from Go 1.8 stdlib's encoding/json/#safeSet - -package json - -import ( - "bytes" - "unicode/utf8" -) - -// safeSet holds the value true if the ASCII character with the given array -// position can be represented inside a JSON string without any further -// escaping. -// -// All values are true except for the ASCII control characters (0-31), the -// double quote ("), and the backslash character ("\"). -var safeSet = [utf8.RuneSelf]bool{ - ' ': true, - '!': true, - '"': false, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '(': true, - ')': true, - '*': true, - '+': true, - ',': true, - '-': true, - '.': true, - '/': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - ':': true, - ';': true, - '<': true, - '=': true, - '>': true, - '?': true, - '@': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'V': true, - 'W': true, - 'X': true, - 'Y': true, - 'Z': true, - '[': true, - '\\': false, - ']': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '{': true, - '|': true, - '}': true, - '~': true, - '\u007f': true, -} - -// copied from Go 1.8 stdlib's encoding/json/#hex -var hex = "0123456789abcdef" - -// escapeStringBytes escapes and writes the passed in string bytes to the dst -// buffer -// -// Copied and modifed from Go 1.8 stdlib's encodeing/json/#encodeState.stringBytes -func escapeStringBytes(e *bytes.Buffer, s []byte) { - e.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if safeSet[b] { - i++ - continue - } - if start < i { - e.Write(s[start:i]) - } - switch b { - case '\\', '"': - e.WriteByte('\\') - e.WriteByte(b) - case '\n': - e.WriteByte('\\') - e.WriteByte('n') - case '\r': - e.WriteByte('\\') - e.WriteByte('r') - case '\t': - e.WriteByte('\\') - e.WriteByte('t') - default: - // This encodes bytes < 0x20 except for \t, \n and \r. - // If escapeHTML is set, it also escapes <, >, and & - // because they can lead to security holes when - // user-controlled strings are rendered into JSON - // and served to some browsers. - e.WriteString(`\u00`) - e.WriteByte(hex[b>>4]) - e.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - e.Write(s[start:i]) - } - e.WriteString(`\ufffd`) - i += size - start = i - continue - } - // U+2028 is LINE SEPARATOR. - // U+2029 is PARAGRAPH SEPARATOR. - // They are both technically valid characters in JSON strings, - // but don't work in JSONP, which has to be evaluated as JavaScript, - // and can lead to security holes there. It is valid JSON to - // escape them, so we do so unconditionally. - // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. - if c == '\u2028' || c == '\u2029' { - if start < i { - e.Write(s[start:i]) - } - e.WriteString(`\u202`) - e.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - e.Write(s[start:]) - } - e.WriteByte('"') -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/object.go b/vendor/github.com/aws/smithy-go/encoding/json/object.go deleted file mode 100644 index 722346d035..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/object.go +++ /dev/null @@ -1,40 +0,0 @@ -package json - -import ( - "bytes" -) - -// Object represents the encoding of a JSON Object type -type Object struct { - w *bytes.Buffer - writeComma bool - scratch *[]byte -} - -func newObject(w *bytes.Buffer, scratch *[]byte) *Object { - w.WriteRune(leftBrace) - return &Object{w: w, scratch: scratch} -} - -func (o *Object) writeKey(key string) { - escapeStringBytes(o.w, []byte(key)) - o.w.WriteRune(colon) -} - -// Key adds the given named key to the JSON object. -// Returns a Value encoder that should be used to encode -// a JSON value type. -func (o *Object) Key(name string) Value { - if o.writeComma { - o.w.WriteRune(comma) - } else { - o.writeComma = true - } - o.writeKey(name) - return newValue(o.w, o.scratch) -} - -// Close encodes the end of the JSON Object -func (o *Object) Close() { - o.w.WriteRune(rightBrace) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/value.go b/vendor/github.com/aws/smithy-go/encoding/json/value.go deleted file mode 100644 index b41ff1e15c..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/value.go +++ /dev/null @@ -1,149 +0,0 @@ -package json - -import ( - "bytes" - "encoding/base64" - "math/big" - "strconv" - - "github.com/aws/smithy-go/encoding" -) - -// Value represents a JSON Value type -// JSON Value types: Object, Array, String, Number, Boolean, and Null -type Value struct { - w *bytes.Buffer - scratch *[]byte -} - -// newValue returns a new Value encoder -func newValue(w *bytes.Buffer, scratch *[]byte) Value { - return Value{w: w, scratch: scratch} -} - -// String encodes v as a JSON string -func (jv Value) String(v string) { - escapeStringBytes(jv.w, []byte(v)) -} - -// Byte encodes v as a JSON number -func (jv Value) Byte(v int8) { - jv.Long(int64(v)) -} - -// Short encodes v as a JSON number -func (jv Value) Short(v int16) { - jv.Long(int64(v)) -} - -// Integer encodes v as a JSON number -func (jv Value) Integer(v int32) { - jv.Long(int64(v)) -} - -// Long encodes v as a JSON number -func (jv Value) Long(v int64) { - *jv.scratch = strconv.AppendInt((*jv.scratch)[:0], v, 10) - jv.w.Write(*jv.scratch) -} - -// ULong encodes v as a JSON number -func (jv Value) ULong(v uint64) { - *jv.scratch = strconv.AppendUint((*jv.scratch)[:0], v, 10) - jv.w.Write(*jv.scratch) -} - -// Float encodes v as a JSON number -func (jv Value) Float(v float32) { - jv.float(float64(v), 32) -} - -// Double encodes v as a JSON number -func (jv Value) Double(v float64) { - jv.float(v, 64) -} - -func (jv Value) float(v float64, bits int) { - *jv.scratch = encoding.EncodeFloat((*jv.scratch)[:0], v, bits) - jv.w.Write(*jv.scratch) -} - -// Boolean encodes v as a JSON boolean -func (jv Value) Boolean(v bool) { - *jv.scratch = strconv.AppendBool((*jv.scratch)[:0], v) - jv.w.Write(*jv.scratch) -} - -// Base64EncodeBytes writes v as a base64 value in JSON string -func (jv Value) Base64EncodeBytes(v []byte) { - encodeByteSlice(jv.w, (*jv.scratch)[:0], v) -} - -// Write writes v directly to the JSON document -func (jv Value) Write(v []byte) { - jv.w.Write(v) -} - -// Array returns a new Array encoder -func (jv Value) Array() *Array { - return newArray(jv.w, jv.scratch) -} - -// Object returns a new Object encoder -func (jv Value) Object() *Object { - return newObject(jv.w, jv.scratch) -} - -// Null encodes a null JSON value -func (jv Value) Null() { - jv.w.WriteString(null) -} - -// BigInteger encodes v as JSON value -func (jv Value) BigInteger(v *big.Int) { - jv.w.Write([]byte(v.Text(10))) -} - -// BigDecimal encodes v as JSON value -func (jv Value) BigDecimal(v *big.Float) { - if i, accuracy := v.Int64(); accuracy == big.Exact { - jv.Long(i) - return - } - // TODO: Should this try to match ES6 ToString similar to stdlib JSON? - jv.w.Write([]byte(v.Text('e', -1))) -} - -// Based on encoding/json encodeByteSlice from the Go Standard Library -// https://golang.org/src/encoding/json/encode.go -func encodeByteSlice(w *bytes.Buffer, scratch []byte, v []byte) { - if v == nil { - w.WriteString(null) - return - } - - w.WriteRune(quote) - - encodedLen := base64.StdEncoding.EncodedLen(len(v)) - if encodedLen <= len(scratch) { - // If the encoded bytes fit in e.scratch, avoid an extra - // allocation and use the cheaper Encoding.Encode. - dst := scratch[:encodedLen] - base64.StdEncoding.Encode(dst, v) - w.Write(dst) - } else if encodedLen <= 1024 { - // The encoded bytes are short enough to allocate for, and - // Encoding.Encode is still cheaper. - dst := make([]byte, encodedLen) - base64.StdEncoding.Encode(dst, v) - w.Write(dst) - } else { - // The encoded bytes are too long to cheaply allocate, and - // Encoding.Encode is no longer noticeably cheaper. - enc := base64.NewEncoder(base64.StdEncoding, w) - enc.Write(v) - enc.Close() - } - - w.WriteRune(quote) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/array.go b/vendor/github.com/aws/smithy-go/encoding/xml/array.go deleted file mode 100644 index 508f3c997e..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/array.go +++ /dev/null @@ -1,49 +0,0 @@ -package xml - -// arrayMemberWrapper is the default member wrapper tag name for XML Array type -var arrayMemberWrapper = StartElement{ - Name: Name{Local: "member"}, -} - -// Array represents the encoding of a XML array type -type Array struct { - w writer - scratch *[]byte - - // member start element is the array member wrapper start element - memberStartElement StartElement - - // isFlattened indicates if the array is a flattened array. - isFlattened bool -} - -// newArray returns an array encoder. -// It also takes in the member start element, array start element. -// It takes in a isFlattened bool, indicating that an array is flattened array. -// -// A wrapped array ["value1", "value2"] is represented as -// `value1value2`. - -// A flattened array `someList: ["value1", "value2"]` is represented as -// `value1value2`. -func newArray(w writer, scratch *[]byte, memberStartElement StartElement, arrayStartElement StartElement, isFlattened bool) *Array { - var memberWrapper = memberStartElement - if isFlattened { - memberWrapper = arrayStartElement - } - - return &Array{ - w: w, - scratch: scratch, - memberStartElement: memberWrapper, - isFlattened: isFlattened, - } -} - -// Member adds a new member to the XML array. -// It returns a Value encoder. -func (a *Array) Member() Value { - v := newValue(a.w, a.scratch, a.memberStartElement) - v.isFlattened = a.isFlattened - return v -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/constants.go b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go deleted file mode 100644 index ccee90a636..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/constants.go +++ /dev/null @@ -1,10 +0,0 @@ -package xml - -const ( - leftAngleBracket = '<' - rightAngleBracket = '>' - forwardSlash = '/' - colon = ':' - equals = '=' - quote = '"' -) diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go deleted file mode 100644 index f9200093e8..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Package xml holds the XMl encoder utility. This utility is written in accordance to our design to delegate to -shape serializer function in which a xml.Value will be passed around. - -Resources followed: https://smithy.io/2.0/spec/protocol-traits.html#xml-bindings - -Member Element - -Member element should be used to encode xml shapes into xml elements except for flattened xml shapes. Member element -write their own element start tag. These elements should always be closed. - -Flattened Element - -Flattened element should be used to encode shapes marked with flattened trait into xml elements. Flattened element -do not write a start tag, and thus should not be closed. - -Simple types encoding - -All simple type methods on value such as String(), Long() etc; auto close the associated member element. - -Array - -Array returns the collection encoder. It has two modes, wrapped and flattened encoding. - -Wrapped arrays have two methods Array() and ArrayWithCustomName() which facilitate array member wrapping. -By default, a wrapped array members are wrapped with `member` named start element. - - appletree - -Flattened arrays rely on Value being marked as flattened. -If a shape is marked as flattened, Array() will use the shape element name as wrapper for array elements. - - appletree - -Map - -Map is the map encoder. It has two modes, wrapped and flattened encoding. - -Wrapped map has Array() method, which facilitate map member wrapping. -By default, a wrapped map members are wrapped with `entry` named start element. - - appletreesnowice - -Flattened map rely on Value being marked as flattened. -If a shape is marked as flattened, Map() will use the shape element name as wrapper for map entry elements. - - appletreesnowice -*/ -package xml diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/element.go b/vendor/github.com/aws/smithy-go/encoding/xml/element.go deleted file mode 100644 index ae84e7999e..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/element.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copied and modified from Go 1.14 stdlib's encoding/xml - -package xml - -// A Name represents an XML name (Local) annotated -// with a name space identifier (Space). -// In tokens returned by Decoder.Token, the Space identifier -// is given as a canonical URL, not the short prefix used -// in the document being parsed. -type Name struct { - Space, Local string -} - -// An Attr represents an attribute in an XML element (Name=Value). -type Attr struct { - Name Name - Value string -} - -/* -NewAttribute returns a pointer to an attribute. -It takes in a local name aka attribute name, and value -representing the attribute value. -*/ -func NewAttribute(local, value string) Attr { - return Attr{ - Name: Name{ - Local: local, - }, - Value: value, - } -} - -/* -NewNamespaceAttribute returns a pointer to an attribute. -It takes in a local name aka attribute name, and value -representing the attribute value. - -NewNamespaceAttribute appends `xmlns:` in front of namespace -prefix. - -For creating a name space attribute representing -`xmlns:prefix="http://example.com`, the breakdown would be: -local = "prefix" -value = "http://example.com" -*/ -func NewNamespaceAttribute(local, value string) Attr { - attr := NewAttribute(local, value) - - // default name space identifier - attr.Name.Space = "xmlns" - return attr -} - -// A StartElement represents an XML start element. -type StartElement struct { - Name Name - Attr []Attr -} - -// Copy creates a new copy of StartElement. -func (e StartElement) Copy() StartElement { - attrs := make([]Attr, len(e.Attr)) - copy(attrs, e.Attr) - e.Attr = attrs - return e -} - -// End returns the corresponding XML end element. -func (e StartElement) End() EndElement { - return EndElement{e.Name} -} - -// returns true if start element local name is empty -func (e StartElement) isZero() bool { - return len(e.Name.Local) == 0 -} - -// An EndElement represents an XML end element. -type EndElement struct { - Name Name -} - -// returns true if end element local name is empty -func (e EndElement) isZero() bool { - return len(e.Name.Local) == 0 -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go deleted file mode 100644 index 16fb3dddb0..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go +++ /dev/null @@ -1,51 +0,0 @@ -package xml - -// writer interface used by the xml encoder to write an encoded xml -// document in a writer. -type writer interface { - - // Write takes in a byte slice and returns number of bytes written and error - Write(p []byte) (n int, err error) - - // WriteRune takes in a rune and returns number of bytes written and error - WriteRune(r rune) (n int, err error) - - // WriteString takes in a string and returns number of bytes written and error - WriteString(s string) (n int, err error) - - // String method returns a string - String() string - - // Bytes return a byte slice. - Bytes() []byte -} - -// Encoder is an XML encoder that supports construction of XML values -// using methods. The encoder takes in a writer and maintains a scratch buffer. -type Encoder struct { - w writer - scratch *[]byte -} - -// NewEncoder returns an XML encoder -func NewEncoder(w writer) *Encoder { - scratch := make([]byte, 64) - - return &Encoder{w: w, scratch: &scratch} -} - -// String returns the string output of the XML encoder -func (e Encoder) String() string { - return e.w.String() -} - -// Bytes returns the []byte slice of the XML encoder -func (e Encoder) Bytes() []byte { - return e.w.Bytes() -} - -// RootElement builds a root element encoding -// It writes it's start element tag. The value should be closed. -func (e Encoder) RootElement(element StartElement) Value { - return newValue(e.w, e.scratch, element) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go deleted file mode 100644 index f3db6ccca8..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go +++ /dev/null @@ -1,51 +0,0 @@ -package xml - -import ( - "encoding/xml" - "fmt" - "io" -) - -// ErrorComponents represents the error response fields -// that will be deserialized from an xml error response body -type ErrorComponents struct { - Code string - Message string -} - -// GetErrorResponseComponents returns the error fields from an xml error response body -func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { - if noErrorWrapping { - var errResponse noWrappedErrorResponse - if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) - } - return ErrorComponents{ - Code: errResponse.Code, - Message: errResponse.Message, - }, nil - } - - var errResponse wrappedErrorResponse - if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) - } - return ErrorComponents{ - Code: errResponse.Code, - Message: errResponse.Message, - }, nil -} - -// noWrappedErrorResponse represents the error response body with -// no internal ... -type wrappedErrorResponse struct { - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/escape.go b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go deleted file mode 100644 index 1c5479af67..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/escape.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copied and modified from Go 1.14 stdlib's encoding/xml - -package xml - -import ( - "unicode/utf8" -) - -// Copied from Go 1.14 stdlib's encoding/xml -var ( - escQuot = []byte(""") // shorter than """ - escApos = []byte("'") // shorter than "'" - escAmp = []byte("&") - escLT = []byte("<") - escGT = []byte(">") - escTab = []byte(" ") - escNL = []byte(" ") - escCR = []byte(" ") - escFFFD = []byte("\uFFFD") // Unicode replacement character - - // Additional Escapes - escNextLine = []byte("…") - escLS = []byte("
") -) - -// Decide whether the given rune is in the XML Character Range, per -// the Char production of https://www.xml.com/axml/testaxml.htm, -// Section 2.2 Characters. -func isInCharacterRange(r rune) (inrange bool) { - return r == 0x09 || - r == 0x0A || - r == 0x0D || - r >= 0x20 && r <= 0xD7FF || - r >= 0xE000 && r <= 0xFFFD || - r >= 0x10000 && r <= 0x10FFFF -} - -// TODO: When do we need to escape the string? -// Based on encoding/xml escapeString from the Go Standard Library. -// https://golang.org/src/encoding/xml/xml.go -func escapeString(e writer, s string) { - var esc []byte - last := 0 - for i := 0; i < len(s); { - r, width := utf8.DecodeRuneInString(s[i:]) - i += width - switch r { - case '"': - esc = escQuot - case '\'': - esc = escApos - case '&': - esc = escAmp - case '<': - esc = escLT - case '>': - esc = escGT - case '\t': - esc = escTab - case '\n': - esc = escNL - case '\r': - esc = escCR - case '\u0085': - // Not escaped by stdlib - esc = escNextLine - case '\u2028': - // Not escaped by stdlib - esc = escLS - default: - if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { - esc = escFFFD - break - } - continue - } - e.WriteString(s[last : i-width]) - e.Write(esc) - last = i - } - e.WriteString(s[last:]) -} - -// escapeText writes to w the properly escaped XML equivalent -// of the plain text data s. If escapeNewline is true, newline -// characters will be escaped. -// -// Based on encoding/xml escapeText from the Go Standard Library. -// https://golang.org/src/encoding/xml/xml.go -func escapeText(e writer, s []byte) { - var esc []byte - last := 0 - for i := 0; i < len(s); { - r, width := utf8.DecodeRune(s[i:]) - i += width - switch r { - case '"': - esc = escQuot - case '\'': - esc = escApos - case '&': - esc = escAmp - case '<': - esc = escLT - case '>': - esc = escGT - case '\t': - esc = escTab - case '\n': - // This always escapes newline, which is different than stdlib's optional - // escape of new line. - esc = escNL - case '\r': - esc = escCR - case '\u0085': - // Not escaped by stdlib - esc = escNextLine - case '\u2028': - // Not escaped by stdlib - esc = escLS - default: - if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { - esc = escFFFD - break - } - continue - } - e.Write(s[last : i-width]) - e.Write(esc) - last = i - } - e.Write(s[last:]) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/map.go b/vendor/github.com/aws/smithy-go/encoding/xml/map.go deleted file mode 100644 index e42858965c..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/map.go +++ /dev/null @@ -1,53 +0,0 @@ -package xml - -// mapEntryWrapper is the default member wrapper start element for XML Map entry -var mapEntryWrapper = StartElement{ - Name: Name{Local: "entry"}, -} - -// Map represents the encoding of a XML map type -type Map struct { - w writer - scratch *[]byte - - // member start element is the map entry wrapper start element - memberStartElement StartElement - - // isFlattened returns true if the map is a flattened map - isFlattened bool -} - -// newMap returns a map encoder which sets the default map -// entry wrapper to `entry`. -// -// A map `someMap : {{key:"abc", value:"123"}}` is represented as -// `abc123`. -func newMap(w writer, scratch *[]byte) *Map { - return &Map{ - w: w, - scratch: scratch, - memberStartElement: mapEntryWrapper, - } -} - -// newFlattenedMap returns a map encoder which sets the map -// entry wrapper to the passed in memberWrapper`. -// -// A flattened map `someMap : {{key:"abc", value:"123"}}` is represented as -// `abc123`. -func newFlattenedMap(w writer, scratch *[]byte, memberWrapper StartElement) *Map { - return &Map{ - w: w, - scratch: scratch, - memberStartElement: memberWrapper, - isFlattened: true, - } -} - -// Entry returns a Value encoder with map's element. -// It writes the member wrapper start tag for each entry. -func (m *Map) Entry() Value { - v := newValue(m.w, m.scratch, m.memberStartElement) - v.isFlattened = m.isFlattened - return v -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/value.go b/vendor/github.com/aws/smithy-go/encoding/xml/value.go deleted file mode 100644 index 09434b2c0b..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/value.go +++ /dev/null @@ -1,302 +0,0 @@ -package xml - -import ( - "encoding/base64" - "fmt" - "math/big" - "strconv" - - "github.com/aws/smithy-go/encoding" -) - -// Value represents an XML Value type -// XML Value types: Object, Array, Map, String, Number, Boolean. -type Value struct { - w writer - scratch *[]byte - - // xml start element is the associated start element for the Value - startElement StartElement - - // indicates if the Value represents a flattened shape - isFlattened bool -} - -// newFlattenedValue returns a Value encoder. newFlattenedValue does NOT write the start element tag -func newFlattenedValue(w writer, scratch *[]byte, startElement StartElement) Value { - return Value{ - w: w, - scratch: scratch, - startElement: startElement, - } -} - -// newValue writes the start element xml tag and returns a Value -func newValue(w writer, scratch *[]byte, startElement StartElement) Value { - writeStartElement(w, startElement) - return Value{w: w, scratch: scratch, startElement: startElement} -} - -// writeStartElement takes in a start element and writes it. -// It handles namespace, attributes in start element. -func writeStartElement(w writer, el StartElement) error { - if el.isZero() { - return fmt.Errorf("xml start element cannot be nil") - } - - w.WriteRune(leftAngleBracket) - - if len(el.Name.Space) != 0 { - escapeString(w, el.Name.Space) - w.WriteRune(colon) - } - escapeString(w, el.Name.Local) - for _, attr := range el.Attr { - w.WriteRune(' ') - writeAttribute(w, &attr) - } - - w.WriteRune(rightAngleBracket) - return nil -} - -// writeAttribute writes an attribute from a provided Attribute -// For a namespace attribute, the attr.Name.Space must be defined as "xmlns". -// https://www.w3.org/TR/REC-xml-names/#NT-DefaultAttName -func writeAttribute(w writer, attr *Attr) { - // if local, space both are not empty - if len(attr.Name.Space) != 0 && len(attr.Name.Local) != 0 { - escapeString(w, attr.Name.Space) - w.WriteRune(colon) - } - - // if prefix is empty, the default `xmlns` space should be used as prefix. - if len(attr.Name.Local) == 0 { - attr.Name.Local = attr.Name.Space - } - - escapeString(w, attr.Name.Local) - w.WriteRune(equals) - w.WriteRune(quote) - escapeString(w, attr.Value) - w.WriteRune(quote) -} - -// writeEndElement takes in a end element and writes it. -func writeEndElement(w writer, el EndElement) error { - if el.isZero() { - return fmt.Errorf("xml end element cannot be nil") - } - - w.WriteRune(leftAngleBracket) - w.WriteRune(forwardSlash) - - if len(el.Name.Space) != 0 { - escapeString(w, el.Name.Space) - w.WriteRune(colon) - } - escapeString(w, el.Name.Local) - w.WriteRune(rightAngleBracket) - - return nil -} - -// String encodes v as a XML string. -// It will auto close the parent xml element tag. -func (xv Value) String(v string) { - escapeString(xv.w, v) - xv.Close() -} - -// Byte encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Byte(v int8) { - xv.Long(int64(v)) -} - -// Short encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Short(v int16) { - xv.Long(int64(v)) -} - -// Integer encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Integer(v int32) { - xv.Long(int64(v)) -} - -// Long encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Long(v int64) { - *xv.scratch = strconv.AppendInt((*xv.scratch)[:0], v, 10) - xv.w.Write(*xv.scratch) - - xv.Close() -} - -// Float encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Float(v float32) { - xv.float(float64(v), 32) - xv.Close() -} - -// Double encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Double(v float64) { - xv.float(v, 64) - xv.Close() -} - -func (xv Value) float(v float64, bits int) { - *xv.scratch = encoding.EncodeFloat((*xv.scratch)[:0], v, bits) - xv.w.Write(*xv.scratch) -} - -// Boolean encodes v as a XML boolean. -// It will auto close the parent xml element tag. -func (xv Value) Boolean(v bool) { - *xv.scratch = strconv.AppendBool((*xv.scratch)[:0], v) - xv.w.Write(*xv.scratch) - - xv.Close() -} - -// Base64EncodeBytes writes v as a base64 value in XML string. -// It will auto close the parent xml element tag. -func (xv Value) Base64EncodeBytes(v []byte) { - encodeByteSlice(xv.w, (*xv.scratch)[:0], v) - xv.Close() -} - -// BigInteger encodes v big.Int as XML value. -// It will auto close the parent xml element tag. -func (xv Value) BigInteger(v *big.Int) { - xv.w.Write([]byte(v.Text(10))) - xv.Close() -} - -// BigDecimal encodes v big.Float as XML value. -// It will auto close the parent xml element tag. -func (xv Value) BigDecimal(v *big.Float) { - if i, accuracy := v.Int64(); accuracy == big.Exact { - xv.Long(i) - return - } - - xv.w.Write([]byte(v.Text('e', -1))) - xv.Close() -} - -// Write writes v directly to the xml document -// if escapeXMLText is set to true, write will escape text. -// It will auto close the parent xml element tag. -func (xv Value) Write(v []byte, escapeXMLText bool) { - // escape and write xml text - if escapeXMLText { - escapeText(xv.w, v) - } else { - // write xml directly - xv.w.Write(v) - } - - xv.Close() -} - -// MemberElement does member element encoding. It returns a Value. -// Member Element method should be used for all shapes except flattened shapes. -// -// A call to MemberElement will write nested element tags directly using the -// provided start element. The value returned by MemberElement should be closed. -func (xv Value) MemberElement(element StartElement) Value { - return newValue(xv.w, xv.scratch, element) -} - -// FlattenedElement returns flattened element encoding. It returns a Value. -// This method should be used for flattened shapes. -// -// Unlike MemberElement, flattened element will NOT write element tags -// directly for the associated start element. -// -// The value returned by the FlattenedElement does not need to be closed. -func (xv Value) FlattenedElement(element StartElement) Value { - v := newFlattenedValue(xv.w, xv.scratch, element) - v.isFlattened = true - return v -} - -// Array returns an array encoder. By default, the members of array are -// wrapped with `` element tag. -// If value is marked as flattened, the start element is used to wrap the members instead of -// the `` element. -func (xv Value) Array() *Array { - return newArray(xv.w, xv.scratch, arrayMemberWrapper, xv.startElement, xv.isFlattened) -} - -/* -ArrayWithCustomName returns an array encoder. - -It takes named start element as an argument, the named start element will used to wrap xml array entries. -for eg, `entry1` -Here `customName` named start element will be wrapped on each array member. -*/ -func (xv Value) ArrayWithCustomName(element StartElement) *Array { - return newArray(xv.w, xv.scratch, element, xv.startElement, xv.isFlattened) -} - -/* -Map returns a map encoder. By default, the map entries are -wrapped with `` element tag. - -If value is marked as flattened, the start element is used to wrap the entry instead of -the `` element. -*/ -func (xv Value) Map() *Map { - // flattened map - if xv.isFlattened { - return newFlattenedMap(xv.w, xv.scratch, xv.startElement) - } - - // un-flattened map - return newMap(xv.w, xv.scratch) -} - -// encodeByteSlice is modified copy of json encoder's encodeByteSlice. -// It is used to base64 encode a byte slice. -func encodeByteSlice(w writer, scratch []byte, v []byte) { - if v == nil { - return - } - - encodedLen := base64.StdEncoding.EncodedLen(len(v)) - if encodedLen <= len(scratch) { - // If the encoded bytes fit in e.scratch, avoid an extra - // allocation and use the cheaper Encoding.Encode. - dst := scratch[:encodedLen] - base64.StdEncoding.Encode(dst, v) - w.Write(dst) - } else if encodedLen <= 1024 { - // The encoded bytes are short enough to allocate for, and - // Encoding.Encode is still cheaper. - dst := make([]byte, encodedLen) - base64.StdEncoding.Encode(dst, v) - w.Write(dst) - } else { - // The encoded bytes are too long to cheaply allocate, and - // Encoding.Encode is no longer noticeably cheaper. - enc := base64.NewEncoder(base64.StdEncoding, w) - enc.Write(v) - enc.Close() - } -} - -// IsFlattened returns true if value is for flattened shape. -func (xv Value) IsFlattened() bool { - return xv.isFlattened -} - -// Close closes the value. -func (xv Value) Close() { - writeEndElement(xv.w, xv.startElement.End()) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go deleted file mode 100644 index dc4eebdffa..0000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go +++ /dev/null @@ -1,154 +0,0 @@ -package xml - -import ( - "encoding/xml" - "fmt" - "strings" -) - -// NodeDecoder is a XML decoder wrapper that is responsible to decoding -// a single XML Node element and it's nested member elements. This wrapper decoder -// takes in the start element of the top level node being decoded. -type NodeDecoder struct { - Decoder *xml.Decoder - StartEl xml.StartElement -} - -// WrapNodeDecoder returns an initialized XMLNodeDecoder -func WrapNodeDecoder(decoder *xml.Decoder, startEl xml.StartElement) NodeDecoder { - return NodeDecoder{ - Decoder: decoder, - StartEl: startEl, - } -} - -// Token on a Node Decoder returns a xml StartElement. It returns a boolean that indicates the -// a token is the node decoder's end node token; and an error which indicates any error -// that occurred while retrieving the start element -func (d NodeDecoder) Token() (t xml.StartElement, done bool, err error) { - for { - token, e := d.Decoder.Token() - if e != nil { - return t, done, e - } - - // check if we reach end of the node being decoded - if el, ok := token.(xml.EndElement); ok { - return t, el == d.StartEl.End(), err - } - - if t, ok := token.(xml.StartElement); ok { - return restoreAttrNamespaces(t), false, err - } - - // skip token if it is a comment or preamble or empty space value due to indentation - // or if it's a value and is not expected - } -} - -// restoreAttrNamespaces update XML attributes to restore the short namespaces found within -// the raw XML document. -func restoreAttrNamespaces(node xml.StartElement) xml.StartElement { - if len(node.Attr) == 0 { - return node - } - - // Generate a mapping of XML namespace values to their short names. - ns := map[string]string{} - for _, a := range node.Attr { - if a.Name.Space == "xmlns" { - ns[a.Value] = a.Name.Local - break - } - } - - for i, a := range node.Attr { - if a.Name.Space == "xmlns" { - continue - } - // By default, xml.Decoder will fully resolve these namespaces. So if you had - // then by default the second attribute would have the `Name.Space` resolved to `baz`. But we need it to - // continue to resolve as `bar` so we can easily identify it later on. - if v, ok := ns[node.Attr[i].Name.Space]; ok { - node.Attr[i].Name.Space = v - } - } - return node -} - -// GetElement looks for the given tag name at the current level, and returns the element if found, and -// skipping over non-matching elements. Returns an error if the node is not found, or if an error occurs while walking -// the document. -func (d NodeDecoder) GetElement(name string) (t xml.StartElement, err error) { - for { - token, done, err := d.Token() - if err != nil { - return t, err - } - if done { - return t, fmt.Errorf("%s node not found", name) - } - switch { - case strings.EqualFold(name, token.Name.Local): - return token, nil - default: - err = d.Decoder.Skip() - if err != nil { - return t, err - } - } - } -} - -// Value provides an abstraction to retrieve char data value within an xml element. -// The method will return an error if it encounters a nested xml element instead of char data. -// This method should only be used to retrieve simple type or blob shape values as []byte. -func (d NodeDecoder) Value() (c []byte, err error) { - t, e := d.Decoder.Token() - if e != nil { - return c, e - } - - endElement := d.StartEl.End() - - switch ev := t.(type) { - case xml.CharData: - c = ev.Copy() - case xml.EndElement: // end tag or self-closing - if ev == endElement { - return []byte{}, err - } - return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) - default: - return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) - } - - t, e = d.Decoder.Token() - if e != nil { - return c, e - } - - if ev, ok := t.(xml.EndElement); ok { - if ev == endElement { - return c, err - } - } - - return c, fmt.Errorf("expected end element %v, got %T type %v instead", endElement, t, t) -} - -// FetchRootElement takes in a decoder and returns the first start element within the xml body. -// This function is useful in fetching the start element of an XML response and ignore the -// comments and preamble -func FetchRootElement(decoder *xml.Decoder) (startElement xml.StartElement, err error) { - for { - t, e := decoder.Token() - if e != nil { - return startElement, e - } - - if startElement, ok := t.(xml.StartElement); ok { - return startElement, err - } - } -} diff --git a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go deleted file mode 100644 index a935283974..0000000000 --- a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go +++ /dev/null @@ -1,23 +0,0 @@ -package transport - -import ( - "net/http" - "net/url" - - "github.com/aws/smithy-go" -) - -// Endpoint is the endpoint object returned by Endpoint resolution V2 -type Endpoint struct { - // The complete URL minimally specfiying the scheme and host. - // May optionally specify the port and base path component. - URI url.URL - - // An optional set of headers to be sent using transport layer headers. - Headers http.Header - - // A grab-bag property map of endpoint attributes. The - // values present here are subject to change, or being add/removed at any - // time. - Properties smithy.Properties -} diff --git a/vendor/github.com/aws/smithy-go/errors.go b/vendor/github.com/aws/smithy-go/errors.go deleted file mode 100644 index d6948d0206..0000000000 --- a/vendor/github.com/aws/smithy-go/errors.go +++ /dev/null @@ -1,137 +0,0 @@ -package smithy - -import "fmt" - -// APIError provides the generic API and protocol agnostic error type all SDK -// generated exception types will implement. -type APIError interface { - error - - // ErrorCode returns the error code for the API exception. - ErrorCode() string - // ErrorMessage returns the error message for the API exception. - ErrorMessage() string - // ErrorFault returns the fault for the API exception. - ErrorFault() ErrorFault -} - -// GenericAPIError provides a generic concrete API error type that SDKs can use -// to deserialize error responses into. Should be used for unmodeled or untyped -// errors. -type GenericAPIError struct { - Code string - Message string - Fault ErrorFault -} - -// ErrorCode returns the error code for the API exception. -func (e *GenericAPIError) ErrorCode() string { return e.Code } - -// ErrorMessage returns the error message for the API exception. -func (e *GenericAPIError) ErrorMessage() string { return e.Message } - -// ErrorFault returns the fault for the API exception. -func (e *GenericAPIError) ErrorFault() ErrorFault { return e.Fault } - -func (e *GenericAPIError) Error() string { - return fmt.Sprintf("api error %s: %s", e.Code, e.Message) -} - -var _ APIError = (*GenericAPIError)(nil) - -// OperationError decorates an underlying error which occurred while invoking -// an operation with names of the operation and API. -type OperationError struct { - ServiceID string - OperationName string - Err error -} - -// Service returns the name of the API service the error occurred with. -func (e *OperationError) Service() string { return e.ServiceID } - -// Operation returns the name of the API operation the error occurred with. -func (e *OperationError) Operation() string { return e.OperationName } - -// Unwrap returns the nested error if any, or nil. -func (e *OperationError) Unwrap() error { return e.Err } - -func (e *OperationError) Error() string { - return fmt.Sprintf("operation error %s: %s, %v", e.ServiceID, e.OperationName, e.Err) -} - -// DeserializationError provides a wrapper for an error that occurs during -// deserialization. -type DeserializationError struct { - Err error // original error - Snapshot []byte -} - -// Error returns a formatted error for DeserializationError -func (e *DeserializationError) Error() string { - const msg = "deserialization failed" - if e.Err == nil { - return msg - } - return fmt.Sprintf("%s, %v", msg, e.Err) -} - -// Unwrap returns the underlying Error in DeserializationError -func (e *DeserializationError) Unwrap() error { return e.Err } - -// ErrorFault provides the type for a Smithy API error fault. -type ErrorFault int - -// ErrorFault enumeration values -const ( - FaultUnknown ErrorFault = iota - FaultServer - FaultClient -) - -func (f ErrorFault) String() string { - switch f { - case FaultServer: - return "server" - case FaultClient: - return "client" - default: - return "unknown" - } -} - -// SerializationError represents an error that occurred while attempting to serialize a request -type SerializationError struct { - Err error // original error -} - -// Error returns a formatted error for SerializationError -func (e *SerializationError) Error() string { - const msg = "serialization failed" - if e.Err == nil { - return msg - } - return fmt.Sprintf("%s: %v", msg, e.Err) -} - -// Unwrap returns the underlying Error in SerializationError -func (e *SerializationError) Unwrap() error { return e.Err } - -// CanceledError is the error that will be returned by an API request that was -// canceled. API operations given a Context may return this error when -// canceled. -type CanceledError struct { - Err error -} - -// CanceledError returns true to satisfy interfaces checking for canceled errors. -func (*CanceledError) CanceledError() bool { return true } - -// Unwrap returns the underlying error, if there was one. -func (e *CanceledError) Unwrap() error { - return e.Err -} - -func (e *CanceledError) Error() string { - return fmt.Sprintf("canceled, %v", e.Err) -} diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go deleted file mode 100644 index 341392e10f..0000000000 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package smithy - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.20.1" diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE deleted file mode 100644 index fe6a62006a..0000000000 --- a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go deleted file mode 100644 index 9c9d02b94b..0000000000 --- a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package singleflight provides a duplicate function call suppression -// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight -// package. The package is forked, because the package a part of the unstable -// and unversioned golang.org/x/sync module. -// -// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight - -package singleflight diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go deleted file mode 100644 index e8a1b17d56..0000000000 --- a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package singleflight - -import ( - "bytes" - "errors" - "fmt" - "runtime" - "runtime/debug" - "sync" -) - -// errGoexit indicates the runtime.Goexit was called in -// the user given function. -var errGoexit = errors.New("runtime.Goexit was called") - -// A panicError is an arbitrary value recovered from a panic -// with the stack trace during the execution of given function. -type panicError struct { - value interface{} - stack []byte -} - -// Error implements error interface. -func (p *panicError) Error() string { - return fmt.Sprintf("%v\n\n%s", p.value, p.stack) -} - -func newPanicError(v interface{}) error { - stack := debug.Stack() - - // The first line of the stack trace is of the form "goroutine N [status]:" - // but by the time the panic reaches Do the goroutine may no longer exist - // and its status will have changed. Trim out the misleading line. - if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { - stack = stack[line+1:] - } - return &panicError{value: v, stack: stack} -} - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - - // These fields are written once before the WaitGroup is done - // and are only read after the WaitGroup is done. - val interface{} - err error - - // forgotten indicates whether Forget was called with this call's key - // while the call was still in flight. - forgotten bool - - // These fields are read and written with the singleflight - // mutex held before the WaitGroup is done, and are read but - // not written after the WaitGroup is done. - dups int - chans []chan<- Result -} - -// Group represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type Group struct { - mu sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Result holds the results of Do, so they can be passed -// on a channel. -type Result struct { - Val interface{} - Err error - Shared bool -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.mu.Unlock() - c.wg.Wait() - - if e, ok := c.err.(*panicError); ok { - panic(e) - } else if c.err == errGoexit { - runtime.Goexit() - } - return c.val, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - g.doCall(c, key, fn) - return c.val, c.err, c.dups > 0 -} - -// DoChan is like Do but returns a channel that will receive the -// results when they are ready. -// -// The returned channel will not be closed. -func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { - ch := make(chan Result, 1) - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - c.chans = append(c.chans, ch) - g.mu.Unlock() - return ch - } - c := &call{chans: []chan<- Result{ch}} - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - go g.doCall(c, key, fn) - - return ch -} - -// doCall handles the single call for a key. -func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { - normalReturn := false - recovered := false - - // use double-defer to distinguish panic from runtime.Goexit, - // more details see https://golang.org/cl/134395 - defer func() { - // the given function invoked runtime.Goexit - if !normalReturn && !recovered { - c.err = errGoexit - } - - c.wg.Done() - g.mu.Lock() - defer g.mu.Unlock() - if !c.forgotten { - delete(g.m, key) - } - - if e, ok := c.err.(*panicError); ok { - // In order to prevent the waiting channels from being blocked forever, - // needs to ensure that this panic cannot be recovered. - if len(c.chans) > 0 { - go panic(e) - select {} // Keep this goroutine around so that it will appear in the crash dump. - } else { - panic(e) - } - } else if c.err == errGoexit { - // Already in the process of goexit, no need to call again - } else { - // Normal return - for _, ch := range c.chans { - ch <- Result{c.val, c.err, c.dups > 0} - } - } - }() - - func() { - defer func() { - if !normalReturn { - // Ideally, we would wait to take a stack trace until we've determined - // whether this is a panic or a runtime.Goexit. - // - // Unfortunately, the only way we can distinguish the two is to see - // whether the recover stopped the goroutine from terminating, and by - // the time we know that, the part of the stack trace relevant to the - // panic has been discarded. - if r := recover(); r != nil { - c.err = newPanicError(r) - } - } - }() - - c.val, c.err = fn() - normalReturn = true - }() - - if !normalReturn { - recovered = true - } -} - -// Forget tells the singleflight to forget about a key. Future calls -// to Do for this key will call the function rather than waiting for -// an earlier call to complete. -func (g *Group) Forget(key string) { - g.mu.Lock() - if c, ok := g.m[key]; ok { - c.forgotten = true - } - delete(g.m, key) - g.mu.Unlock() -} diff --git a/vendor/github.com/aws/smithy-go/io/byte.go b/vendor/github.com/aws/smithy-go/io/byte.go deleted file mode 100644 index f8417c15b8..0000000000 --- a/vendor/github.com/aws/smithy-go/io/byte.go +++ /dev/null @@ -1,12 +0,0 @@ -package io - -const ( - // Byte is 8 bits - Byte int64 = 1 - // KibiByte (KiB) is 1024 Bytes - KibiByte = Byte * 1024 - // MebiByte (MiB) is 1024 KiB - MebiByte = KibiByte * 1024 - // GibiByte (GiB) is 1024 MiB - GibiByte = MebiByte * 1024 -) diff --git a/vendor/github.com/aws/smithy-go/io/doc.go b/vendor/github.com/aws/smithy-go/io/doc.go deleted file mode 100644 index a6a33eaf56..0000000000 --- a/vendor/github.com/aws/smithy-go/io/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package io provides utilities for Smithy generated API clients. -package io diff --git a/vendor/github.com/aws/smithy-go/io/reader.go b/vendor/github.com/aws/smithy-go/io/reader.go deleted file mode 100644 index 07063f2960..0000000000 --- a/vendor/github.com/aws/smithy-go/io/reader.go +++ /dev/null @@ -1,16 +0,0 @@ -package io - -import ( - "io" -) - -// ReadSeekNopCloser wraps an io.ReadSeeker with an additional Close method -// that does nothing. -type ReadSeekNopCloser struct { - io.ReadSeeker -} - -// Close does nothing. -func (ReadSeekNopCloser) Close() error { - return nil -} diff --git a/vendor/github.com/aws/smithy-go/io/ringbuffer.go b/vendor/github.com/aws/smithy-go/io/ringbuffer.go deleted file mode 100644 index 06b476add8..0000000000 --- a/vendor/github.com/aws/smithy-go/io/ringbuffer.go +++ /dev/null @@ -1,94 +0,0 @@ -package io - -import ( - "bytes" - "io" -) - -// RingBuffer struct satisfies io.ReadWrite interface. -// -// ReadBuffer is a revolving buffer data structure, which can be used to store snapshots of data in a -// revolving window. -type RingBuffer struct { - slice []byte - start int - end int - size int -} - -// NewRingBuffer method takes in a byte slice as an input and returns a RingBuffer. -func NewRingBuffer(slice []byte) *RingBuffer { - ringBuf := RingBuffer{ - slice: slice, - } - return &ringBuf -} - -// Write method inserts the elements in a byte slice, and returns the number of bytes written along with any error. -func (r *RingBuffer) Write(p []byte) (int, error) { - for _, b := range p { - // check if end points to invalid index, we need to circle back - if r.end == len(r.slice) { - r.end = 0 - } - // check if start points to invalid index, we need to circle back - if r.start == len(r.slice) { - r.start = 0 - } - // if ring buffer is filled, increment the start index - if r.size == len(r.slice) { - r.size-- - r.start++ - } - - r.slice[r.end] = b - r.end++ - r.size++ - } - return len(p), nil -} - -// Read copies the data on the ring buffer into the byte slice provided to the method. -// Returns the read count along with any error encountered while reading. -func (r *RingBuffer) Read(p []byte) (int, error) { - // readCount keeps track of the number of bytes read - var readCount int - for j := 0; j < len(p); j++ { - // if ring buffer is empty or completely read - // return EOF error. - if r.size == 0 { - return readCount, io.EOF - } - - if r.start == len(r.slice) { - r.start = 0 - } - - p[j] = r.slice[r.start] - readCount++ - // increment the start pointer for ring buffer - r.start++ - // decrement the size of ring buffer - r.size-- - } - return readCount, nil -} - -// Len returns the number of unread bytes in the buffer. -func (r *RingBuffer) Len() int { - return r.size -} - -// Bytes returns a copy of the RingBuffer's bytes. -func (r RingBuffer) Bytes() []byte { - var b bytes.Buffer - io.Copy(&b, &r) - return b.Bytes() -} - -// Reset resets the ring buffer. -func (r *RingBuffer) Reset() { - *r = RingBuffer{ - slice: r.slice, - } -} diff --git a/vendor/github.com/aws/smithy-go/local-mod-replace.sh b/vendor/github.com/aws/smithy-go/local-mod-replace.sh deleted file mode 100644 index 800bf37695..0000000000 --- a/vendor/github.com/aws/smithy-go/local-mod-replace.sh +++ /dev/null @@ -1,39 +0,0 @@ -#1/usr/bin/env bash - -PROJECT_DIR="" -SMITHY_SOURCE_DIR=$(cd `dirname $0` && pwd) - -usage() { - echo "Usage: $0 [-s SMITHY_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2 - exit 1 -} - -while getopts "hs:d:" options; do - case "${options}" in - s) - SMITHY_SOURCE_DIR=${OPTARG} - if [ "$SMITHY_SOURCE_DIR" == "" ]; then - echo "path to smithy-go source directory is required" || exit - usage - fi - ;; - d) - PROJECT_DIR=${OPTARG} - ;; - h) - usage - ;; - *) - usage - ;; - esac -done - -if [ "$PROJECT_DIR" != "" ]; then - cd $PROJECT_DIR || exit -fi - -go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/smithy-go" | while read x; do - repPath=${x/github.com\/aws\/smithy-go/${SMITHY_SOURCE_DIR}} - echo -replace $x=$repPath -done | xargs go mod edit diff --git a/vendor/github.com/aws/smithy-go/logging/logger.go b/vendor/github.com/aws/smithy-go/logging/logger.go deleted file mode 100644 index 2071924bd3..0000000000 --- a/vendor/github.com/aws/smithy-go/logging/logger.go +++ /dev/null @@ -1,82 +0,0 @@ -package logging - -import ( - "context" - "io" - "log" -) - -// Classification is the type of the log entry's classification name. -type Classification string - -// Set of standard classifications that can be used by clients and middleware -const ( - Warn Classification = "WARN" - Debug Classification = "DEBUG" -) - -// Logger is an interface for logging entries at certain classifications. -type Logger interface { - // Logf is expected to support the standard fmt package "verbs". - Logf(classification Classification, format string, v ...interface{}) -} - -// LoggerFunc is a wrapper around a function to satisfy the Logger interface. -type LoggerFunc func(classification Classification, format string, v ...interface{}) - -// Logf delegates the logging request to the wrapped function. -func (f LoggerFunc) Logf(classification Classification, format string, v ...interface{}) { - f(classification, format, v...) -} - -// ContextLogger is an optional interface a Logger implementation may expose that provides -// the ability to create context aware log entries. -type ContextLogger interface { - WithContext(context.Context) Logger -} - -// WithContext will pass the provided context to logger if it implements the ContextLogger interface and return the resulting -// logger. Otherwise the logger will be returned as is. As a special case if a nil logger is provided, a Nop logger will -// be returned to the caller. -func WithContext(ctx context.Context, logger Logger) Logger { - if logger == nil { - return Nop{} - } - - cl, ok := logger.(ContextLogger) - if !ok { - return logger - } - - return cl.WithContext(ctx) -} - -// Nop is a Logger implementation that simply does not perform any logging. -type Nop struct{} - -// Logf simply returns without performing any action -func (n Nop) Logf(Classification, string, ...interface{}) { - return -} - -// StandardLogger is a Logger implementation that wraps the standard library logger, and delegates logging to it's -// Printf method. -type StandardLogger struct { - Logger *log.Logger -} - -// Logf logs the given classification and message to the underlying logger. -func (s StandardLogger) Logf(classification Classification, format string, v ...interface{}) { - if len(classification) != 0 { - format = string(classification) + " " + format - } - - s.Logger.Printf(format, v...) -} - -// NewStandardLogger returns a new StandardLogger -func NewStandardLogger(writer io.Writer) *StandardLogger { - return &StandardLogger{ - Logger: log.New(writer, "SDK ", log.LstdFlags), - } -} diff --git a/vendor/github.com/aws/smithy-go/middleware/doc.go b/vendor/github.com/aws/smithy-go/middleware/doc.go deleted file mode 100644 index 9858928a7f..0000000000 --- a/vendor/github.com/aws/smithy-go/middleware/doc.go +++ /dev/null @@ -1,67 +0,0 @@ -// Package middleware provides transport agnostic middleware for decorating SDK -// handlers. -// -// The Smithy middleware stack provides ordered behavior to be invoked on an -// underlying handler. The stack is separated into steps that are invoked in a -// static order. A step is a collection of middleware that are injected into a -// ordered list defined by the user. The user may add, insert, swap, and remove a -// step's middleware. When the stack is invoked the step middleware become static, -// and their order cannot be modified. -// -// A stack and its step middleware are **not** safe to modify concurrently. -// -// A stack will use the ordered list of middleware to decorate a underlying -// handler. A handler could be something like an HTTP Client that round trips an -// API operation over HTTP. -// -// Smithy Middleware Stack -// -// A Stack is a collection of middleware that wrap a handler. The stack can be -// broken down into discreet steps. Each step may contain zero or more middleware -// specific to that stack's step. -// -// A Stack Step is a predefined set of middleware that are invoked in a static -// order by the Stack. These steps represent fixed points in the middleware stack -// for organizing specific behavior, such as serialize and build. A Stack Step is -// composed of zero or more middleware that are specific to that step. A step may -// define its own set of input/output parameters the generic input/output -// parameters are cast from. A step calls its middleware recursively, before -// calling the next step in the stack returning the result or error of the step -// middleware decorating the underlying handler. -// -// * Initialize: Prepares the input, and sets any default parameters as needed, -// (e.g. idempotency token, and presigned URLs). -// -// * Serialize: Serializes the prepared input into a data structure that can be -// consumed by the target transport's message, (e.g. REST-JSON serialization). -// -// * Build: Adds additional metadata to the serialized transport message, (e.g. -// HTTP's Content-Length header, or body checksum). Decorations and -// modifications to the message should be copied to all message attempts. -// -// * Finalize: Performs final preparations needed before sending the message. The -// message should already be complete by this stage, and is only alternated to -// meet the expectations of the recipient, (e.g. Retry and AWS SigV4 request -// signing). -// -// * Deserialize: Reacts to the handler's response returned by the recipient of -// the request message. Deserializes the response into a structured type or -// error above stacks can react to. -// -// Adding Middleware to a Stack Step -// -// Middleware can be added to a step front or back, or relative, by name, to an -// existing middleware in that stack. If a middleware does not have a name, a -// unique name will be generated at the middleware and be added to the step. -// -// // Create middleware stack -// stack := middleware.NewStack() -// -// // Add middleware to stack steps -// stack.Initialize.Add(paramValidationMiddleware, middleware.After) -// stack.Serialize.Add(marshalOperationFoo, middleware.After) -// stack.Deserialize.Add(unmarshalOperationFoo, middleware.After) -// -// // Invoke middleware on handler. -// resp, err := stack.HandleMiddleware(ctx, req.Input, clientHandler) -package middleware diff --git a/vendor/github.com/aws/smithy-go/middleware/logging.go b/vendor/github.com/aws/smithy-go/middleware/logging.go deleted file mode 100644 index c2f0dbb6bd..0000000000 --- a/vendor/github.com/aws/smithy-go/middleware/logging.go +++ /dev/null @@ -1,46 +0,0 @@ -package middleware - -import ( - "context" - - "github.com/aws/smithy-go/logging" -) - -// loggerKey is the context value key for which the logger is associated with. -type loggerKey struct{} - -// GetLogger takes a context to retrieve a Logger from. If no logger is present on the context a logging.Nop logger -// is returned. If the logger retrieved from context supports the ContextLogger interface, the context will be passed -// to the WithContext method and the resulting logger will be returned. Otherwise the stored logger is returned as is. -func GetLogger(ctx context.Context) logging.Logger { - logger, ok := ctx.Value(loggerKey{}).(logging.Logger) - if !ok || logger == nil { - return logging.Nop{} - } - - return logging.WithContext(ctx, logger) -} - -// SetLogger sets the provided logger value on the provided ctx. -func SetLogger(ctx context.Context, logger logging.Logger) context.Context { - return context.WithValue(ctx, loggerKey{}, logger) -} - -type setLogger struct { - Logger logging.Logger -} - -// AddSetLoggerMiddleware adds a middleware that will add the provided logger to the middleware context. -func AddSetLoggerMiddleware(stack *Stack, logger logging.Logger) error { - return stack.Initialize.Add(&setLogger{Logger: logger}, After) -} - -func (a *setLogger) ID() string { - return "SetLogger" -} - -func (a *setLogger) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( - out InitializeOutput, metadata Metadata, err error, -) { - return next.HandleInitialize(SetLogger(ctx, a.Logger), in) -} diff --git a/vendor/github.com/aws/smithy-go/middleware/metadata.go b/vendor/github.com/aws/smithy-go/middleware/metadata.go deleted file mode 100644 index 7bb7dbcf5a..0000000000 --- a/vendor/github.com/aws/smithy-go/middleware/metadata.go +++ /dev/null @@ -1,65 +0,0 @@ -package middleware - -// MetadataReader provides an interface for reading metadata from the -// underlying metadata container. -type MetadataReader interface { - Get(key interface{}) interface{} -} - -// Metadata provides storing and reading metadata values. Keys may be any -// comparable value type. Get and set will panic if key is not a comparable -// value type. -// -// Metadata uses lazy initialization, and Set method must be called as an -// addressable value, or pointer. Not doing so may cause key/value pair to not -// be set. -type Metadata struct { - values map[interface{}]interface{} -} - -// Get attempts to retrieve the value the key points to. Returns nil if the -// key was not found. -// -// Panics if key type is not comparable. -func (m Metadata) Get(key interface{}) interface{} { - return m.values[key] -} - -// Clone creates a shallow copy of Metadata entries, returning a new Metadata -// value with the original entries copied into it. -func (m Metadata) Clone() Metadata { - vs := make(map[interface{}]interface{}, len(m.values)) - for k, v := range m.values { - vs[k] = v - } - - return Metadata{ - values: vs, - } -} - -// Set stores the value pointed to by the key. If a value already exists at -// that key it will be replaced with the new value. -// -// Set method must be called as an addressable value, or pointer. If Set is not -// called as an addressable value or pointer, the key value pair being set may -// be lost. -// -// Panics if the key type is not comparable. -func (m *Metadata) Set(key, value interface{}) { - if m.values == nil { - m.values = map[interface{}]interface{}{} - } - m.values[key] = value -} - -// Has returns whether the key exists in the metadata. -// -// Panics if the key type is not comparable. -func (m Metadata) Has(key interface{}) bool { - if m.values == nil { - return false - } - _, ok := m.values[key] - return ok -} diff --git a/vendor/github.com/aws/smithy-go/middleware/middleware.go b/vendor/github.com/aws/smithy-go/middleware/middleware.go deleted file mode 100644 index 803b7c7518..0000000000 --- a/vendor/github.com/aws/smithy-go/middleware/middleware.go +++ /dev/null @@ -1,71 +0,0 @@ -package middleware - -import ( - "context" -) - -// Handler provides the interface for performing the logic to obtain an output, -// or error for the given input. -type Handler interface { - // Handle performs logic to obtain an output for the given input. Handler - // should be decorated with middleware to perform input specific behavior. - Handle(ctx context.Context, input interface{}) ( - output interface{}, metadata Metadata, err error, - ) -} - -// HandlerFunc provides a wrapper around a function pointer to be used as a -// middleware handler. -type HandlerFunc func(ctx context.Context, input interface{}) ( - output interface{}, metadata Metadata, err error, -) - -// Handle invokes the underlying function, returning the result. -func (fn HandlerFunc) Handle(ctx context.Context, input interface{}) ( - output interface{}, metadata Metadata, err error, -) { - return fn(ctx, input) -} - -// Middleware provides the interface to call handlers in a chain. -type Middleware interface { - // ID provides a unique identifier for the middleware. - ID() string - - // Performs the middleware's handling of the input, returning the output, - // or error. The middleware can invoke the next Handler if handling should - // continue. - HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( - output interface{}, metadata Metadata, err error, - ) -} - -// decoratedHandler wraps a middleware in order to to call the next handler in -// the chain. -type decoratedHandler struct { - // The next handler to be called. - Next Handler - - // The current middleware decorating the handler. - With Middleware -} - -// Handle implements the Handler interface to handle a operation invocation. -func (m decoratedHandler) Handle(ctx context.Context, input interface{}) ( - output interface{}, metadata Metadata, err error, -) { - return m.With.HandleMiddleware(ctx, input, m.Next) -} - -// DecorateHandler decorates a handler with a middleware. Wrapping the handler -// with the middleware. -func DecorateHandler(h Handler, with ...Middleware) Handler { - for i := len(with) - 1; i >= 0; i-- { - h = decoratedHandler{ - Next: h, - With: with[i], - } - } - - return h -} diff --git a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go deleted file mode 100644 index 4b195308c5..0000000000 --- a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go +++ /dev/null @@ -1,268 +0,0 @@ -package middleware - -import "fmt" - -// RelativePosition provides specifying the relative position of a middleware -// in an ordered group. -type RelativePosition int - -// Relative position for middleware in steps. -const ( - After RelativePosition = iota - Before -) - -type ider interface { - ID() string -} - -// orderedIDs provides an ordered collection of items with relative ordering -// by name. -type orderedIDs struct { - order *relativeOrder - items map[string]ider -} - -const baseOrderedItems = 5 - -func newOrderedIDs() *orderedIDs { - return &orderedIDs{ - order: newRelativeOrder(), - items: make(map[string]ider, baseOrderedItems), - } -} - -// Add injects the item to the relative position of the item group. Returns an -// error if the item already exists. -func (g *orderedIDs) Add(m ider, pos RelativePosition) error { - id := m.ID() - if len(id) == 0 { - return fmt.Errorf("empty ID, ID must not be empty") - } - - if err := g.order.Add(pos, id); err != nil { - return err - } - - g.items[id] = m - return nil -} - -// Insert injects the item relative to an existing item id. Returns an error if -// the original item does not exist, or the item being added already exists. -func (g *orderedIDs) Insert(m ider, relativeTo string, pos RelativePosition) error { - if len(m.ID()) == 0 { - return fmt.Errorf("insert ID must not be empty") - } - if len(relativeTo) == 0 { - return fmt.Errorf("relative to ID must not be empty") - } - - if err := g.order.Insert(relativeTo, pos, m.ID()); err != nil { - return err - } - - g.items[m.ID()] = m - return nil -} - -// Get returns the ider identified by id. If ider is not present, returns false. -func (g *orderedIDs) Get(id string) (ider, bool) { - v, ok := g.items[id] - return v, ok -} - -// Swap removes the item by id, replacing it with the new item. Returns an error -// if the original item doesn't exist. -func (g *orderedIDs) Swap(id string, m ider) (ider, error) { - if len(id) == 0 { - return nil, fmt.Errorf("swap from ID must not be empty") - } - - iderID := m.ID() - if len(iderID) == 0 { - return nil, fmt.Errorf("swap to ID must not be empty") - } - - if err := g.order.Swap(id, iderID); err != nil { - return nil, err - } - - removed := g.items[id] - - delete(g.items, id) - g.items[iderID] = m - - return removed, nil -} - -// Remove removes the item by id. Returns an error if the item -// doesn't exist. -func (g *orderedIDs) Remove(id string) (ider, error) { - if len(id) == 0 { - return nil, fmt.Errorf("remove ID must not be empty") - } - - if err := g.order.Remove(id); err != nil { - return nil, err - } - - removed := g.items[id] - delete(g.items, id) - return removed, nil -} - -func (g *orderedIDs) List() []string { - items := g.order.List() - order := make([]string, len(items)) - copy(order, items) - return order -} - -// Clear removes all entries and slots. -func (g *orderedIDs) Clear() { - g.order.Clear() - g.items = map[string]ider{} -} - -// GetOrder returns the item in the order it should be invoked in. -func (g *orderedIDs) GetOrder() []interface{} { - order := g.order.List() - ordered := make([]interface{}, len(order)) - for i := 0; i < len(order); i++ { - ordered[i] = g.items[order[i]] - } - - return ordered -} - -// relativeOrder provides ordering of item -type relativeOrder struct { - order []string -} - -func newRelativeOrder() *relativeOrder { - return &relativeOrder{ - order: make([]string, 0, baseOrderedItems), - } -} - -// Add inserts an item into the order relative to the position provided. -func (s *relativeOrder) Add(pos RelativePosition, ids ...string) error { - if len(ids) == 0 { - return nil - } - - for _, id := range ids { - if _, ok := s.has(id); ok { - return fmt.Errorf("already exists, %v", id) - } - } - - switch pos { - case Before: - return s.insert(0, Before, ids...) - - case After: - s.order = append(s.order, ids...) - - default: - return fmt.Errorf("invalid position, %v", int(pos)) - } - - return nil -} - -// Insert injects an item before or after the relative item. Returns -// an error if the relative item does not exist. -func (s *relativeOrder) Insert(relativeTo string, pos RelativePosition, ids ...string) error { - if len(ids) == 0 { - return nil - } - - for _, id := range ids { - if _, ok := s.has(id); ok { - return fmt.Errorf("already exists, %v", id) - } - } - - i, ok := s.has(relativeTo) - if !ok { - return fmt.Errorf("not found, %v", relativeTo) - } - - return s.insert(i, pos, ids...) -} - -// Swap will replace the item id with the to item. Returns an -// error if the original item id does not exist. Allows swapping out an -// item for another item with the same id. -func (s *relativeOrder) Swap(id, to string) error { - i, ok := s.has(id) - if !ok { - return fmt.Errorf("not found, %v", id) - } - - if _, ok = s.has(to); ok && id != to { - return fmt.Errorf("already exists, %v", to) - } - - s.order[i] = to - return nil -} - -func (s *relativeOrder) Remove(id string) error { - i, ok := s.has(id) - if !ok { - return fmt.Errorf("not found, %v", id) - } - - s.order = append(s.order[:i], s.order[i+1:]...) - return nil -} - -func (s *relativeOrder) List() []string { - return s.order -} - -func (s *relativeOrder) Clear() { - s.order = s.order[0:0] -} - -func (s *relativeOrder) insert(i int, pos RelativePosition, ids ...string) error { - switch pos { - case Before: - n := len(ids) - var src []string - if n <= cap(s.order)-len(s.order) { - s.order = s.order[:len(s.order)+n] - src = s.order - } else { - src = s.order - s.order = make([]string, len(s.order)+n) - copy(s.order[:i], src[:i]) // only when allocating a new slice do we need to copy the front half - } - copy(s.order[i+n:], src[i:]) - copy(s.order[i:], ids) - case After: - if i == len(s.order)-1 || len(s.order) == 0 { - s.order = append(s.order, ids...) - } else { - s.order = append(s.order[:i+1], append(ids, s.order[i+1:]...)...) - } - - default: - return fmt.Errorf("invalid position, %v", int(pos)) - } - - return nil -} - -func (s *relativeOrder) has(id string) (i int, found bool) { - for i := 0; i < len(s.order); i++ { - if s.order[i] == id { - return i, true - } - } - return 0, false -} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack.go b/vendor/github.com/aws/smithy-go/middleware/stack.go deleted file mode 100644 index 45ccb5b93c..0000000000 --- a/vendor/github.com/aws/smithy-go/middleware/stack.go +++ /dev/null @@ -1,209 +0,0 @@ -package middleware - -import ( - "context" - "io" - "strings" -) - -// Stack provides protocol and transport agnostic set of middleware split into -// distinct steps. Steps have specific transitions between them, that are -// managed by the individual step. -// -// Steps are composed as middleware around the underlying handler in the -// following order: -// -// Initialize -> Serialize -> Build -> Finalize -> Deserialize -> Handler -// -// Any middleware within the chain may choose to stop and return an error or -// response. Since the middleware decorate the handler like a call stack, each -// middleware will receive the result of the next middleware in the chain. -// Middleware that does not need to react to an input, or result must forward -// along the input down the chain, or return the result back up the chain. -// -// Initialize <- Serialize -> Build -> Finalize <- Deserialize <- Handler -type Stack struct { - // Initialize prepares the input, and sets any default parameters as - // needed, (e.g. idempotency token, and presigned URLs). - // - // Takes Input Parameters, and returns result or error. - // - // Receives result or error from Serialize step. - Initialize *InitializeStep - - // Serialize serializes the prepared input into a data structure that can be consumed - // by the target transport's message, (e.g. REST-JSON serialization) - // - // Converts Input Parameters into a Request, and returns the result or error. - // - // Receives result or error from Build step. - Serialize *SerializeStep - - // Build adds additional metadata to the serialized transport message - // (e.g. HTTP's Content-Length header, or body checksum). Decorations and - // modifications to the message should be copied to all message attempts. - // - // Takes Request, and returns result or error. - // - // Receives result or error from Finalize step. - Build *BuildStep - - // Finalize performs final preparations needed before sending the message. The - // message should already be complete by this stage, and is only alternated - // to meet the expectations of the recipient (e.g. Retry and AWS SigV4 - // request signing) - // - // Takes Request, and returns result or error. - // - // Receives result or error from Deserialize step. - Finalize *FinalizeStep - - // Deserialize reacts to the handler's response returned by the recipient of the request - // message. Deserializes the response into a structured type or error above - // stacks can react to. - // - // Should only forward Request to underlying handler. - // - // Takes Request, and returns result or error. - // - // Receives raw response, or error from underlying handler. - Deserialize *DeserializeStep - - id string -} - -// NewStack returns an initialize empty stack. -func NewStack(id string, newRequestFn func() interface{}) *Stack { - return &Stack{ - id: id, - Initialize: NewInitializeStep(), - Serialize: NewSerializeStep(newRequestFn), - Build: NewBuildStep(), - Finalize: NewFinalizeStep(), - Deserialize: NewDeserializeStep(), - } -} - -// ID returns the unique ID for the stack as a middleware. -func (s *Stack) ID() string { return s.id } - -// HandleMiddleware invokes the middleware stack decorating the next handler. -// Each step of stack will be invoked in order before calling the next step. -// With the next handler call last. -// -// The input value must be the input parameters of the operation being -// performed. -// -// Will return the result of the operation, or error. -func (s *Stack) HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( - output interface{}, metadata Metadata, err error, -) { - h := DecorateHandler(next, - s.Initialize, - s.Serialize, - s.Build, - s.Finalize, - s.Deserialize, - ) - - return h.Handle(ctx, input) -} - -// List returns a list of all middleware in the stack by step. -func (s *Stack) List() []string { - var l []string - l = append(l, s.id) - - l = append(l, s.Initialize.ID()) - l = append(l, s.Initialize.List()...) - - l = append(l, s.Serialize.ID()) - l = append(l, s.Serialize.List()...) - - l = append(l, s.Build.ID()) - l = append(l, s.Build.List()...) - - l = append(l, s.Finalize.ID()) - l = append(l, s.Finalize.List()...) - - l = append(l, s.Deserialize.ID()) - l = append(l, s.Deserialize.List()...) - - return l -} - -func (s *Stack) String() string { - var b strings.Builder - - w := &indentWriter{w: &b} - - w.WriteLine(s.id) - w.Push() - - writeStepItems(w, s.Initialize) - writeStepItems(w, s.Serialize) - writeStepItems(w, s.Build) - writeStepItems(w, s.Finalize) - writeStepItems(w, s.Deserialize) - - return b.String() -} - -type stackStepper interface { - ID() string - List() []string -} - -func writeStepItems(w *indentWriter, s stackStepper) { - type lister interface { - List() []string - } - - w.WriteLine(s.ID()) - w.Push() - - defer w.Pop() - - // ignore stack to prevent circular iterations - if _, ok := s.(*Stack); ok { - return - } - - for _, id := range s.List() { - w.WriteLine(id) - } -} - -type stringWriter interface { - io.Writer - WriteString(string) (int, error) - WriteRune(rune) (int, error) -} - -type indentWriter struct { - w stringWriter - depth int -} - -const indentDepth = "\t\t\t\t\t\t\t\t\t\t" - -func (w *indentWriter) Push() { - w.depth++ -} - -func (w *indentWriter) Pop() { - w.depth-- - if w.depth < 0 { - w.depth = 0 - } -} - -func (w *indentWriter) WriteLine(v string) { - w.w.WriteString(indentDepth[:w.depth]) - - v = strings.ReplaceAll(v, "\n", "\\n") - v = strings.ReplaceAll(v, "\r", "\\r") - - w.w.WriteString(v) - w.w.WriteRune('\n') -} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack_values.go b/vendor/github.com/aws/smithy-go/middleware/stack_values.go deleted file mode 100644 index ef96009ba1..0000000000 --- a/vendor/github.com/aws/smithy-go/middleware/stack_values.go +++ /dev/null @@ -1,100 +0,0 @@ -package middleware - -import ( - "context" - "reflect" - "strings" -) - -// WithStackValue adds a key value pair to the context that is intended to be -// scoped to a stack. Use ClearStackValues to get a new context with all stack -// values cleared. -func WithStackValue(ctx context.Context, key, value interface{}) context.Context { - md, _ := ctx.Value(stackValuesKey{}).(*stackValues) - - md = withStackValue(md, key, value) - return context.WithValue(ctx, stackValuesKey{}, md) -} - -// ClearStackValues returns a context without any stack values. -func ClearStackValues(ctx context.Context) context.Context { - return context.WithValue(ctx, stackValuesKey{}, nil) -} - -// GetStackValues returns the value pointed to by the key within the stack -// values, if it is present. -func GetStackValue(ctx context.Context, key interface{}) interface{} { - md, _ := ctx.Value(stackValuesKey{}).(*stackValues) - if md == nil { - return nil - } - - return md.Value(key) -} - -type stackValuesKey struct{} - -type stackValues struct { - key interface{} - value interface{} - parent *stackValues -} - -func withStackValue(parent *stackValues, key, value interface{}) *stackValues { - if key == nil { - panic("nil key") - } - if !reflect.TypeOf(key).Comparable() { - panic("key is not comparable") - } - return &stackValues{key: key, value: value, parent: parent} -} - -func (m *stackValues) Value(key interface{}) interface{} { - if key == m.key { - return m.value - } - - if m.parent == nil { - return nil - } - - return m.parent.Value(key) -} - -func (c *stackValues) String() string { - var str strings.Builder - - cc := c - for cc == nil { - str.WriteString("(" + - reflect.TypeOf(c.key).String() + - ": " + - stringify(cc.value) + - ")") - if cc.parent != nil { - str.WriteString(" -> ") - } - cc = cc.parent - } - str.WriteRune('}') - - return str.String() -} - -type stringer interface { - String() string -} - -// stringify tries a bit to stringify v, without using fmt, since we don't -// want context depending on the unicode tables. This is only used by -// *valueCtx.String(). -func stringify(v interface{}) string { - switch s := v.(type) { - case stringer: - return s.String() - case string: - return s - } - return "" -} diff --git a/vendor/github.com/aws/smithy-go/middleware/step_build.go b/vendor/github.com/aws/smithy-go/middleware/step_build.go deleted file mode 100644 index 7e1d94caee..0000000000 --- a/vendor/github.com/aws/smithy-go/middleware/step_build.go +++ /dev/null @@ -1,211 +0,0 @@ -package middleware - -import ( - "context" -) - -// BuildInput provides the input parameters for the BuildMiddleware to consume. -// BuildMiddleware may modify the Request value before forwarding the input -// along to the next BuildHandler. -type BuildInput struct { - Request interface{} -} - -// BuildOutput provides the result returned by the next BuildHandler. -type BuildOutput struct { - Result interface{} -} - -// BuildHandler provides the interface for the next handler the -// BuildMiddleware will call in the middleware chain. -type BuildHandler interface { - HandleBuild(ctx context.Context, in BuildInput) ( - out BuildOutput, metadata Metadata, err error, - ) -} - -// BuildMiddleware provides the interface for middleware specific to the -// serialize step. Delegates to the next BuildHandler for further -// processing. -type BuildMiddleware interface { - // Unique ID for the middleware in theBuildStep. The step does not allow - // duplicate IDs. - ID() string - - // Invokes the middleware behavior which must delegate to the next handler - // for the middleware chain to continue. The method must return a result or - // error to its caller. - HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( - out BuildOutput, metadata Metadata, err error, - ) -} - -// BuildMiddlewareFunc returns a BuildMiddleware with the unique ID provided, -// and the func to be invoked. -func BuildMiddlewareFunc(id string, fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error)) BuildMiddleware { - return buildMiddlewareFunc{ - id: id, - fn: fn, - } -} - -type buildMiddlewareFunc struct { - // Unique ID for the middleware. - id string - - // Middleware function to be called. - fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error) -} - -// ID returns the unique ID for the middleware. -func (s buildMiddlewareFunc) ID() string { return s.id } - -// HandleBuild invokes the middleware Fn. -func (s buildMiddlewareFunc) HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( - out BuildOutput, metadata Metadata, err error, -) { - return s.fn(ctx, in, next) -} - -var _ BuildMiddleware = (buildMiddlewareFunc{}) - -// BuildStep provides the ordered grouping of BuildMiddleware to be invoked on -// a handler. -type BuildStep struct { - ids *orderedIDs -} - -// NewBuildStep returns a BuildStep ready to have middleware for -// initialization added to it. -func NewBuildStep() *BuildStep { - return &BuildStep{ - ids: newOrderedIDs(), - } -} - -var _ Middleware = (*BuildStep)(nil) - -// ID returns the unique name of the step as a middleware. -func (s *BuildStep) ID() string { - return "Build stack step" -} - -// HandleMiddleware invokes the middleware by decorating the next handler -// provided. Returns the result of the middleware and handler being invoked. -// -// Implements Middleware interface. -func (s *BuildStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( - out interface{}, metadata Metadata, err error, -) { - order := s.ids.GetOrder() - - var h BuildHandler = buildWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedBuildHandler{ - Next: h, - With: order[i].(BuildMiddleware), - } - } - - sIn := BuildInput{ - Request: in, - } - - res, metadata, err := h.HandleBuild(ctx, sIn) - return res.Result, metadata, err -} - -// Get retrieves the middleware identified by id. If the middleware is not present, returns false. -func (s *BuildStep) Get(id string) (BuildMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { - return nil, false - } - return get.(BuildMiddleware), ok -} - -// Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. -func (s *BuildStep) Add(m BuildMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) -} - -// Insert injects the middleware relative to an existing middleware id. -// Returns an error if the original middleware does not exist, or the middleware -// being added already exists. -func (s *BuildStep) Insert(m BuildMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) -} - -// Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or an error if the middleware to be removed -// doesn't exist. -func (s *BuildStep) Swap(id string, m BuildMiddleware) (BuildMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err - } - - return removed.(BuildMiddleware), nil -} - -// Remove removes the middleware by id. Returns error if the middleware -// doesn't exist. -func (s *BuildStep) Remove(id string) (BuildMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err - } - - return removed.(BuildMiddleware), nil -} - -// List returns a list of the middleware in the step. -func (s *BuildStep) List() []string { - return s.ids.List() -} - -// Clear removes all middleware in the step. -func (s *BuildStep) Clear() { - s.ids.Clear() -} - -type buildWrapHandler struct { - Next Handler -} - -var _ BuildHandler = (*buildWrapHandler)(nil) - -// Implements BuildHandler, converts types and delegates to underlying -// generic handler. -func (w buildWrapHandler) HandleBuild(ctx context.Context, in BuildInput) ( - out BuildOutput, metadata Metadata, err error, -) { - res, metadata, err := w.Next.Handle(ctx, in.Request) - return BuildOutput{ - Result: res, - }, metadata, err -} - -type decoratedBuildHandler struct { - Next BuildHandler - With BuildMiddleware -} - -var _ BuildHandler = (*decoratedBuildHandler)(nil) - -func (h decoratedBuildHandler) HandleBuild(ctx context.Context, in BuildInput) ( - out BuildOutput, metadata Metadata, err error, -) { - return h.With.HandleBuild(ctx, in, h.Next) -} - -// BuildHandlerFunc provides a wrapper around a function to be used as a build middleware handler. -type BuildHandlerFunc func(context.Context, BuildInput) (BuildOutput, Metadata, error) - -// HandleBuild invokes the wrapped function with the provided arguments. -func (b BuildHandlerFunc) HandleBuild(ctx context.Context, in BuildInput) (BuildOutput, Metadata, error) { - return b(ctx, in) -} - -var _ BuildHandler = BuildHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go deleted file mode 100644 index 4486072157..0000000000 --- a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go +++ /dev/null @@ -1,217 +0,0 @@ -package middleware - -import ( - "context" -) - -// DeserializeInput provides the input parameters for the DeserializeInput to -// consume. DeserializeMiddleware should not modify the Request, and instead -// forward it along to the next DeserializeHandler. -type DeserializeInput struct { - Request interface{} -} - -// DeserializeOutput provides the result returned by the next -// DeserializeHandler. The DeserializeMiddleware should deserialize the -// RawResponse into a Result that can be consumed by middleware higher up in -// the stack. -type DeserializeOutput struct { - RawResponse interface{} - Result interface{} -} - -// DeserializeHandler provides the interface for the next handler the -// DeserializeMiddleware will call in the middleware chain. -type DeserializeHandler interface { - HandleDeserialize(ctx context.Context, in DeserializeInput) ( - out DeserializeOutput, metadata Metadata, err error, - ) -} - -// DeserializeMiddleware provides the interface for middleware specific to the -// serialize step. Delegates to the next DeserializeHandler for further -// processing. -type DeserializeMiddleware interface { - // ID returns a unique ID for the middleware in the DeserializeStep. The step does not - // allow duplicate IDs. - ID() string - - // HandleDeserialize invokes the middleware behavior which must delegate to the next handler - // for the middleware chain to continue. The method must return a result or - // error to its caller. - HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( - out DeserializeOutput, metadata Metadata, err error, - ) -} - -// DeserializeMiddlewareFunc returns a DeserializeMiddleware with the unique ID -// provided, and the func to be invoked. -func DeserializeMiddlewareFunc(id string, fn func(context.Context, DeserializeInput, DeserializeHandler) (DeserializeOutput, Metadata, error)) DeserializeMiddleware { - return deserializeMiddlewareFunc{ - id: id, - fn: fn, - } -} - -type deserializeMiddlewareFunc struct { - // Unique ID for the middleware. - id string - - // Middleware function to be called. - fn func(context.Context, DeserializeInput, DeserializeHandler) ( - DeserializeOutput, Metadata, error, - ) -} - -// ID returns the unique ID for the middleware. -func (s deserializeMiddlewareFunc) ID() string { return s.id } - -// HandleDeserialize invokes the middleware Fn. -func (s deserializeMiddlewareFunc) HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( - out DeserializeOutput, metadata Metadata, err error, -) { - return s.fn(ctx, in, next) -} - -var _ DeserializeMiddleware = (deserializeMiddlewareFunc{}) - -// DeserializeStep provides the ordered grouping of DeserializeMiddleware to be -// invoked on a handler. -type DeserializeStep struct { - ids *orderedIDs -} - -// NewDeserializeStep returns a DeserializeStep ready to have middleware for -// initialization added to it. -func NewDeserializeStep() *DeserializeStep { - return &DeserializeStep{ - ids: newOrderedIDs(), - } -} - -var _ Middleware = (*DeserializeStep)(nil) - -// ID returns the unique ID of the step as a middleware. -func (s *DeserializeStep) ID() string { - return "Deserialize stack step" -} - -// HandleMiddleware invokes the middleware by decorating the next handler -// provided. Returns the result of the middleware and handler being invoked. -// -// Implements Middleware interface. -func (s *DeserializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( - out interface{}, metadata Metadata, err error, -) { - order := s.ids.GetOrder() - - var h DeserializeHandler = deserializeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedDeserializeHandler{ - Next: h, - With: order[i].(DeserializeMiddleware), - } - } - - sIn := DeserializeInput{ - Request: in, - } - - res, metadata, err := h.HandleDeserialize(ctx, sIn) - return res.Result, metadata, err -} - -// Get retrieves the middleware identified by id. If the middleware is not present, returns false. -func (s *DeserializeStep) Get(id string) (DeserializeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { - return nil, false - } - return get.(DeserializeMiddleware), ok -} - -// Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. -func (s *DeserializeStep) Add(m DeserializeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) -} - -// Insert injects the middleware relative to an existing middleware ID. -// Returns error if the original middleware does not exist, or the middleware -// being added already exists. -func (s *DeserializeStep) Insert(m DeserializeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) -} - -// Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or error if the middleware to be removed -// doesn't exist. -func (s *DeserializeStep) Swap(id string, m DeserializeMiddleware) (DeserializeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err - } - - return removed.(DeserializeMiddleware), nil -} - -// Remove removes the middleware by id. Returns error if the middleware -// doesn't exist. -func (s *DeserializeStep) Remove(id string) (DeserializeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err - } - - return removed.(DeserializeMiddleware), nil -} - -// List returns a list of the middleware in the step. -func (s *DeserializeStep) List() []string { - return s.ids.List() -} - -// Clear removes all middleware in the step. -func (s *DeserializeStep) Clear() { - s.ids.Clear() -} - -type deserializeWrapHandler struct { - Next Handler -} - -var _ DeserializeHandler = (*deserializeWrapHandler)(nil) - -// HandleDeserialize implements DeserializeHandler, converts types and delegates to underlying -// generic handler. -func (w deserializeWrapHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( - out DeserializeOutput, metadata Metadata, err error, -) { - resp, metadata, err := w.Next.Handle(ctx, in.Request) - return DeserializeOutput{ - RawResponse: resp, - }, metadata, err -} - -type decoratedDeserializeHandler struct { - Next DeserializeHandler - With DeserializeMiddleware -} - -var _ DeserializeHandler = (*decoratedDeserializeHandler)(nil) - -func (h decoratedDeserializeHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( - out DeserializeOutput, metadata Metadata, err error, -) { - return h.With.HandleDeserialize(ctx, in, h.Next) -} - -// DeserializeHandlerFunc provides a wrapper around a function to be used as a deserialize middleware handler. -type DeserializeHandlerFunc func(context.Context, DeserializeInput) (DeserializeOutput, Metadata, error) - -// HandleDeserialize invokes the wrapped function with the given arguments. -func (d DeserializeHandlerFunc) HandleDeserialize(ctx context.Context, in DeserializeInput) (DeserializeOutput, Metadata, error) { - return d(ctx, in) -} - -var _ DeserializeHandler = DeserializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go deleted file mode 100644 index 065e3885de..0000000000 --- a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go +++ /dev/null @@ -1,211 +0,0 @@ -package middleware - -import "context" - -// FinalizeInput provides the input parameters for the FinalizeMiddleware to -// consume. FinalizeMiddleware may modify the Request value before forwarding -// the FinalizeInput along to the next next FinalizeHandler. -type FinalizeInput struct { - Request interface{} -} - -// FinalizeOutput provides the result returned by the next FinalizeHandler. -type FinalizeOutput struct { - Result interface{} -} - -// FinalizeHandler provides the interface for the next handler the -// FinalizeMiddleware will call in the middleware chain. -type FinalizeHandler interface { - HandleFinalize(ctx context.Context, in FinalizeInput) ( - out FinalizeOutput, metadata Metadata, err error, - ) -} - -// FinalizeMiddleware provides the interface for middleware specific to the -// serialize step. Delegates to the next FinalizeHandler for further -// processing. -type FinalizeMiddleware interface { - // ID returns a unique ID for the middleware in the FinalizeStep. The step does not - // allow duplicate IDs. - ID() string - - // HandleFinalize invokes the middleware behavior which must delegate to the next handler - // for the middleware chain to continue. The method must return a result or - // error to its caller. - HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( - out FinalizeOutput, metadata Metadata, err error, - ) -} - -// FinalizeMiddlewareFunc returns a FinalizeMiddleware with the unique ID -// provided, and the func to be invoked. -func FinalizeMiddlewareFunc(id string, fn func(context.Context, FinalizeInput, FinalizeHandler) (FinalizeOutput, Metadata, error)) FinalizeMiddleware { - return finalizeMiddlewareFunc{ - id: id, - fn: fn, - } -} - -type finalizeMiddlewareFunc struct { - // Unique ID for the middleware. - id string - - // Middleware function to be called. - fn func(context.Context, FinalizeInput, FinalizeHandler) ( - FinalizeOutput, Metadata, error, - ) -} - -// ID returns the unique ID for the middleware. -func (s finalizeMiddlewareFunc) ID() string { return s.id } - -// HandleFinalize invokes the middleware Fn. -func (s finalizeMiddlewareFunc) HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( - out FinalizeOutput, metadata Metadata, err error, -) { - return s.fn(ctx, in, next) -} - -var _ FinalizeMiddleware = (finalizeMiddlewareFunc{}) - -// FinalizeStep provides the ordered grouping of FinalizeMiddleware to be -// invoked on a handler. -type FinalizeStep struct { - ids *orderedIDs -} - -// NewFinalizeStep returns a FinalizeStep ready to have middleware for -// initialization added to it. -func NewFinalizeStep() *FinalizeStep { - return &FinalizeStep{ - ids: newOrderedIDs(), - } -} - -var _ Middleware = (*FinalizeStep)(nil) - -// ID returns the unique id of the step as a middleware. -func (s *FinalizeStep) ID() string { - return "Finalize stack step" -} - -// HandleMiddleware invokes the middleware by decorating the next handler -// provided. Returns the result of the middleware and handler being invoked. -// -// Implements Middleware interface. -func (s *FinalizeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( - out interface{}, metadata Metadata, err error, -) { - order := s.ids.GetOrder() - - var h FinalizeHandler = finalizeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedFinalizeHandler{ - Next: h, - With: order[i].(FinalizeMiddleware), - } - } - - sIn := FinalizeInput{ - Request: in, - } - - res, metadata, err := h.HandleFinalize(ctx, sIn) - return res.Result, metadata, err -} - -// Get retrieves the middleware identified by id. If the middleware is not present, returns false. -func (s *FinalizeStep) Get(id string) (FinalizeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { - return nil, false - } - return get.(FinalizeMiddleware), ok -} - -// Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. -func (s *FinalizeStep) Add(m FinalizeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) -} - -// Insert injects the middleware relative to an existing middleware ID. -// Returns error if the original middleware does not exist, or the middleware -// being added already exists. -func (s *FinalizeStep) Insert(m FinalizeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) -} - -// Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or error if the middleware to be removed -// doesn't exist. -func (s *FinalizeStep) Swap(id string, m FinalizeMiddleware) (FinalizeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err - } - - return removed.(FinalizeMiddleware), nil -} - -// Remove removes the middleware by id. Returns error if the middleware -// doesn't exist. -func (s *FinalizeStep) Remove(id string) (FinalizeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err - } - - return removed.(FinalizeMiddleware), nil -} - -// List returns a list of the middleware in the step. -func (s *FinalizeStep) List() []string { - return s.ids.List() -} - -// Clear removes all middleware in the step. -func (s *FinalizeStep) Clear() { - s.ids.Clear() -} - -type finalizeWrapHandler struct { - Next Handler -} - -var _ FinalizeHandler = (*finalizeWrapHandler)(nil) - -// HandleFinalize implements FinalizeHandler, converts types and delegates to underlying -// generic handler. -func (w finalizeWrapHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( - out FinalizeOutput, metadata Metadata, err error, -) { - res, metadata, err := w.Next.Handle(ctx, in.Request) - return FinalizeOutput{ - Result: res, - }, metadata, err -} - -type decoratedFinalizeHandler struct { - Next FinalizeHandler - With FinalizeMiddleware -} - -var _ FinalizeHandler = (*decoratedFinalizeHandler)(nil) - -func (h decoratedFinalizeHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( - out FinalizeOutput, metadata Metadata, err error, -) { - return h.With.HandleFinalize(ctx, in, h.Next) -} - -// FinalizeHandlerFunc provides a wrapper around a function to be used as a finalize middleware handler. -type FinalizeHandlerFunc func(context.Context, FinalizeInput) (FinalizeOutput, Metadata, error) - -// HandleFinalize invokes the wrapped function with the given arguments. -func (f FinalizeHandlerFunc) HandleFinalize(ctx context.Context, in FinalizeInput) (FinalizeOutput, Metadata, error) { - return f(ctx, in) -} - -var _ FinalizeHandler = FinalizeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go deleted file mode 100644 index fe359144d2..0000000000 --- a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go +++ /dev/null @@ -1,211 +0,0 @@ -package middleware - -import "context" - -// InitializeInput wraps the input parameters for the InitializeMiddlewares to -// consume. InitializeMiddleware may modify the parameter value before -// forwarding it along to the next InitializeHandler. -type InitializeInput struct { - Parameters interface{} -} - -// InitializeOutput provides the result returned by the next InitializeHandler. -type InitializeOutput struct { - Result interface{} -} - -// InitializeHandler provides the interface for the next handler the -// InitializeMiddleware will call in the middleware chain. -type InitializeHandler interface { - HandleInitialize(ctx context.Context, in InitializeInput) ( - out InitializeOutput, metadata Metadata, err error, - ) -} - -// InitializeMiddleware provides the interface for middleware specific to the -// initialize step. Delegates to the next InitializeHandler for further -// processing. -type InitializeMiddleware interface { - // ID returns a unique ID for the middleware in the InitializeStep. The step does not - // allow duplicate IDs. - ID() string - - // HandleInitialize invokes the middleware behavior which must delegate to the next handler - // for the middleware chain to continue. The method must return a result or - // error to its caller. - HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( - out InitializeOutput, metadata Metadata, err error, - ) -} - -// InitializeMiddlewareFunc returns a InitializeMiddleware with the unique ID provided, -// and the func to be invoked. -func InitializeMiddlewareFunc(id string, fn func(context.Context, InitializeInput, InitializeHandler) (InitializeOutput, Metadata, error)) InitializeMiddleware { - return initializeMiddlewareFunc{ - id: id, - fn: fn, - } -} - -type initializeMiddlewareFunc struct { - // Unique ID for the middleware. - id string - - // Middleware function to be called. - fn func(context.Context, InitializeInput, InitializeHandler) ( - InitializeOutput, Metadata, error, - ) -} - -// ID returns the unique ID for the middleware. -func (s initializeMiddlewareFunc) ID() string { return s.id } - -// HandleInitialize invokes the middleware Fn. -func (s initializeMiddlewareFunc) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( - out InitializeOutput, metadata Metadata, err error, -) { - return s.fn(ctx, in, next) -} - -var _ InitializeMiddleware = (initializeMiddlewareFunc{}) - -// InitializeStep provides the ordered grouping of InitializeMiddleware to be -// invoked on a handler. -type InitializeStep struct { - ids *orderedIDs -} - -// NewInitializeStep returns an InitializeStep ready to have middleware for -// initialization added to it. -func NewInitializeStep() *InitializeStep { - return &InitializeStep{ - ids: newOrderedIDs(), - } -} - -var _ Middleware = (*InitializeStep)(nil) - -// ID returns the unique ID of the step as a middleware. -func (s *InitializeStep) ID() string { - return "Initialize stack step" -} - -// HandleMiddleware invokes the middleware by decorating the next handler -// provided. Returns the result of the middleware and handler being invoked. -// -// Implements Middleware interface. -func (s *InitializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( - out interface{}, metadata Metadata, err error, -) { - order := s.ids.GetOrder() - - var h InitializeHandler = initializeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedInitializeHandler{ - Next: h, - With: order[i].(InitializeMiddleware), - } - } - - sIn := InitializeInput{ - Parameters: in, - } - - res, metadata, err := h.HandleInitialize(ctx, sIn) - return res.Result, metadata, err -} - -// Get retrieves the middleware identified by id. If the middleware is not present, returns false. -func (s *InitializeStep) Get(id string) (InitializeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { - return nil, false - } - return get.(InitializeMiddleware), ok -} - -// Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. -func (s *InitializeStep) Add(m InitializeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) -} - -// Insert injects the middleware relative to an existing middleware ID. -// Returns error if the original middleware does not exist, or the middleware -// being added already exists. -func (s *InitializeStep) Insert(m InitializeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) -} - -// Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or error if the middleware to be removed -// doesn't exist. -func (s *InitializeStep) Swap(id string, m InitializeMiddleware) (InitializeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err - } - - return removed.(InitializeMiddleware), nil -} - -// Remove removes the middleware by id. Returns error if the middleware -// doesn't exist. -func (s *InitializeStep) Remove(id string) (InitializeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err - } - - return removed.(InitializeMiddleware), nil -} - -// List returns a list of the middleware in the step. -func (s *InitializeStep) List() []string { - return s.ids.List() -} - -// Clear removes all middleware in the step. -func (s *InitializeStep) Clear() { - s.ids.Clear() -} - -type initializeWrapHandler struct { - Next Handler -} - -var _ InitializeHandler = (*initializeWrapHandler)(nil) - -// HandleInitialize implements InitializeHandler, converts types and delegates to underlying -// generic handler. -func (w initializeWrapHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( - out InitializeOutput, metadata Metadata, err error, -) { - res, metadata, err := w.Next.Handle(ctx, in.Parameters) - return InitializeOutput{ - Result: res, - }, metadata, err -} - -type decoratedInitializeHandler struct { - Next InitializeHandler - With InitializeMiddleware -} - -var _ InitializeHandler = (*decoratedInitializeHandler)(nil) - -func (h decoratedInitializeHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( - out InitializeOutput, metadata Metadata, err error, -) { - return h.With.HandleInitialize(ctx, in, h.Next) -} - -// InitializeHandlerFunc provides a wrapper around a function to be used as an initialize middleware handler. -type InitializeHandlerFunc func(context.Context, InitializeInput) (InitializeOutput, Metadata, error) - -// HandleInitialize calls the wrapped function with the provided arguments. -func (i InitializeHandlerFunc) HandleInitialize(ctx context.Context, in InitializeInput) (InitializeOutput, Metadata, error) { - return i(ctx, in) -} - -var _ InitializeHandler = InitializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go deleted file mode 100644 index 114bafcede..0000000000 --- a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go +++ /dev/null @@ -1,219 +0,0 @@ -package middleware - -import "context" - -// SerializeInput provides the input parameters for the SerializeMiddleware to -// consume. SerializeMiddleware may modify the Request value before forwarding -// SerializeInput along to the next SerializeHandler. The Parameters member -// should not be modified by SerializeMiddleware, InitializeMiddleware should -// be responsible for modifying the provided Parameter value. -type SerializeInput struct { - Parameters interface{} - Request interface{} -} - -// SerializeOutput provides the result returned by the next SerializeHandler. -type SerializeOutput struct { - Result interface{} -} - -// SerializeHandler provides the interface for the next handler the -// SerializeMiddleware will call in the middleware chain. -type SerializeHandler interface { - HandleSerialize(ctx context.Context, in SerializeInput) ( - out SerializeOutput, metadata Metadata, err error, - ) -} - -// SerializeMiddleware provides the interface for middleware specific to the -// serialize step. Delegates to the next SerializeHandler for further -// processing. -type SerializeMiddleware interface { - // ID returns a unique ID for the middleware in the SerializeStep. The step does not - // allow duplicate IDs. - ID() string - - // HandleSerialize invokes the middleware behavior which must delegate to the next handler - // for the middleware chain to continue. The method must return a result or - // error to its caller. - HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( - out SerializeOutput, metadata Metadata, err error, - ) -} - -// SerializeMiddlewareFunc returns a SerializeMiddleware with the unique ID -// provided, and the func to be invoked. -func SerializeMiddlewareFunc(id string, fn func(context.Context, SerializeInput, SerializeHandler) (SerializeOutput, Metadata, error)) SerializeMiddleware { - return serializeMiddlewareFunc{ - id: id, - fn: fn, - } -} - -type serializeMiddlewareFunc struct { - // Unique ID for the middleware. - id string - - // Middleware function to be called. - fn func(context.Context, SerializeInput, SerializeHandler) ( - SerializeOutput, Metadata, error, - ) -} - -// ID returns the unique ID for the middleware. -func (s serializeMiddlewareFunc) ID() string { return s.id } - -// HandleSerialize invokes the middleware Fn. -func (s serializeMiddlewareFunc) HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( - out SerializeOutput, metadata Metadata, err error, -) { - return s.fn(ctx, in, next) -} - -var _ SerializeMiddleware = (serializeMiddlewareFunc{}) - -// SerializeStep provides the ordered grouping of SerializeMiddleware to be -// invoked on a handler. -type SerializeStep struct { - newRequest func() interface{} - ids *orderedIDs -} - -// NewSerializeStep returns a SerializeStep ready to have middleware for -// initialization added to it. The newRequest func parameter is used to -// initialize the transport specific request for the stack SerializeStep to -// serialize the input parameters into. -func NewSerializeStep(newRequest func() interface{}) *SerializeStep { - return &SerializeStep{ - ids: newOrderedIDs(), - newRequest: newRequest, - } -} - -var _ Middleware = (*SerializeStep)(nil) - -// ID returns the unique ID of the step as a middleware. -func (s *SerializeStep) ID() string { - return "Serialize stack step" -} - -// HandleMiddleware invokes the middleware by decorating the next handler -// provided. Returns the result of the middleware and handler being invoked. -// -// Implements Middleware interface. -func (s *SerializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( - out interface{}, metadata Metadata, err error, -) { - order := s.ids.GetOrder() - - var h SerializeHandler = serializeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedSerializeHandler{ - Next: h, - With: order[i].(SerializeMiddleware), - } - } - - sIn := SerializeInput{ - Parameters: in, - Request: s.newRequest(), - } - - res, metadata, err := h.HandleSerialize(ctx, sIn) - return res.Result, metadata, err -} - -// Get retrieves the middleware identified by id. If the middleware is not present, returns false. -func (s *SerializeStep) Get(id string) (SerializeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { - return nil, false - } - return get.(SerializeMiddleware), ok -} - -// Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. -func (s *SerializeStep) Add(m SerializeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) -} - -// Insert injects the middleware relative to an existing middleware ID. -// Returns error if the original middleware does not exist, or the middleware -// being added already exists. -func (s *SerializeStep) Insert(m SerializeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) -} - -// Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or error if the middleware to be removed -// doesn't exist. -func (s *SerializeStep) Swap(id string, m SerializeMiddleware) (SerializeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err - } - - return removed.(SerializeMiddleware), nil -} - -// Remove removes the middleware by id. Returns error if the middleware -// doesn't exist. -func (s *SerializeStep) Remove(id string) (SerializeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err - } - - return removed.(SerializeMiddleware), nil -} - -// List returns a list of the middleware in the step. -func (s *SerializeStep) List() []string { - return s.ids.List() -} - -// Clear removes all middleware in the step. -func (s *SerializeStep) Clear() { - s.ids.Clear() -} - -type serializeWrapHandler struct { - Next Handler -} - -var _ SerializeHandler = (*serializeWrapHandler)(nil) - -// Implements SerializeHandler, converts types and delegates to underlying -// generic handler. -func (w serializeWrapHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( - out SerializeOutput, metadata Metadata, err error, -) { - res, metadata, err := w.Next.Handle(ctx, in.Request) - return SerializeOutput{ - Result: res, - }, metadata, err -} - -type decoratedSerializeHandler struct { - Next SerializeHandler - With SerializeMiddleware -} - -var _ SerializeHandler = (*decoratedSerializeHandler)(nil) - -func (h decoratedSerializeHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( - out SerializeOutput, metadata Metadata, err error, -) { - return h.With.HandleSerialize(ctx, in, h.Next) -} - -// SerializeHandlerFunc provides a wrapper around a function to be used as a serialize middleware handler. -type SerializeHandlerFunc func(context.Context, SerializeInput) (SerializeOutput, Metadata, error) - -// HandleSerialize calls the wrapped function with the provided arguments. -func (s SerializeHandlerFunc) HandleSerialize(ctx context.Context, in SerializeInput) (SerializeOutput, Metadata, error) { - return s(ctx, in) -} - -var _ SerializeHandler = SerializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml deleted file mode 100644 index 9d94b7cbd0..0000000000 --- a/vendor/github.com/aws/smithy-go/modman.toml +++ /dev/null @@ -1,10 +0,0 @@ -[dependencies] - "github.com/jmespath/go-jmespath" = "v0.4.0" - -[modules] - - [modules.codegen] - no_tag = true - - [modules."codegen/smithy-go-codegen/build/test-generated/go/internal/testmodule"] - no_tag = true diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go b/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go deleted file mode 100644 index 004d78f213..0000000000 --- a/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go +++ /dev/null @@ -1,30 +0,0 @@ -package requestcompression - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" -) - -func gzipCompress(input io.Reader) ([]byte, error) { - var b bytes.Buffer - w, err := gzip.NewWriterLevel(&b, gzip.DefaultCompression) - if err != nil { - return nil, fmt.Errorf("failed to create gzip writer, %v", err) - } - - inBytes, err := io.ReadAll(input) - if err != nil { - return nil, fmt.Errorf("failed read payload to compress, %v", err) - } - - if _, err = w.Write(inBytes); err != nil { - return nil, fmt.Errorf("failed to write payload to be compressed, %v", err) - } - if err = w.Close(); err != nil { - return nil, fmt.Errorf("failed to flush payload being compressed, %v", err) - } - - return b.Bytes(), nil -} diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go deleted file mode 100644 index 06c16afc11..0000000000 --- a/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go +++ /dev/null @@ -1,52 +0,0 @@ -package requestcompression - -import ( - "bytes" - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "net/http" -) - -const captureUncompressedRequestID = "CaptureUncompressedRequest" - -// AddCaptureUncompressedRequestMiddleware captures http request before compress encoding for check -func AddCaptureUncompressedRequestMiddleware(stack *middleware.Stack, buf *bytes.Buffer) error { - return stack.Serialize.Insert(&captureUncompressedRequestMiddleware{ - buf: buf, - }, "RequestCompression", middleware.Before) -} - -type captureUncompressedRequestMiddleware struct { - req *http.Request - buf *bytes.Buffer - bytes []byte -} - -// ID returns id of the captureUncompressedRequestMiddleware -func (*captureUncompressedRequestMiddleware) ID() string { - return captureUncompressedRequestID -} - -// HandleSerialize captures request payload before it is compressed by request compression middleware -func (m *captureUncompressedRequestMiddleware) HandleSerialize(ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, -) ( - output middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := input.Request.(*smithyhttp.Request) - if !ok { - return output, metadata, fmt.Errorf("error when retrieving http request") - } - - _, err = io.Copy(m.buf, request.GetStream()) - if err != nil { - return output, metadata, fmt.Errorf("error when copying http request stream: %q", err) - } - if err = request.RewindStream(); err != nil { - return output, metadata, fmt.Errorf("error when rewinding request stream: %q", err) - } - - return next.HandleSerialize(ctx, input) -} diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go deleted file mode 100644 index 7c41476039..0000000000 --- a/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go +++ /dev/null @@ -1,103 +0,0 @@ -// Package requestcompression implements runtime support for smithy-modeled -// request compression. -// -// This package is designated as private and is intended for use only by the -// smithy client runtime. The exported API therein is not considered stable and -// is subject to breaking changes without notice. -package requestcompression - -import ( - "bytes" - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/transport/http" - "io" -) - -const MaxRequestMinCompressSizeBytes = 10485760 - -// Enumeration values for supported compress Algorithms. -const ( - GZIP = "gzip" -) - -type compressFunc func(io.Reader) ([]byte, error) - -var allowedAlgorithms = map[string]compressFunc{ - GZIP: gzipCompress, -} - -// AddRequestCompression add requestCompression middleware to op stack -func AddRequestCompression(stack *middleware.Stack, disabled bool, minBytes int64, algorithms []string) error { - return stack.Serialize.Add(&requestCompression{ - disableRequestCompression: disabled, - requestMinCompressSizeBytes: minBytes, - compressAlgorithms: algorithms, - }, middleware.After) -} - -type requestCompression struct { - disableRequestCompression bool - requestMinCompressSizeBytes int64 - compressAlgorithms []string -} - -// ID returns the ID of the middleware -func (m requestCompression) ID() string { - return "RequestCompression" -} - -// HandleSerialize gzip compress the request's stream/body if enabled by config fields -func (m requestCompression) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if m.disableRequestCompression { - return next.HandleSerialize(ctx, in) - } - // still need to check requestMinCompressSizeBytes in case it is out of range after service client config - if m.requestMinCompressSizeBytes < 0 || m.requestMinCompressSizeBytes > MaxRequestMinCompressSizeBytes { - return out, metadata, fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", m.requestMinCompressSizeBytes) - } - - req, ok := in.Request.(*http.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - for _, algorithm := range m.compressAlgorithms { - compressFunc := allowedAlgorithms[algorithm] - if compressFunc != nil { - if stream := req.GetStream(); stream != nil { - size, found, err := req.StreamLength() - if err != nil { - return out, metadata, fmt.Errorf("error while finding request stream length, %v", err) - } else if !found || size < m.requestMinCompressSizeBytes { - return next.HandleSerialize(ctx, in) - } - - compressedBytes, err := compressFunc(stream) - if err != nil { - return out, metadata, fmt.Errorf("failed to compress request stream, %v", err) - } - - var newReq *http.Request - if newReq, err = req.SetStream(bytes.NewReader(compressedBytes)); err != nil { - return out, metadata, fmt.Errorf("failed to set request stream, %v", err) - } - *req = *newReq - - if val := req.Header.Get("Content-Encoding"); val != "" { - req.Header.Set("Content-Encoding", fmt.Sprintf("%s, %s", val, algorithm)) - } else { - req.Header.Set("Content-Encoding", algorithm) - } - } - break - } - } - - return next.HandleSerialize(ctx, in) -} diff --git a/vendor/github.com/aws/smithy-go/properties.go b/vendor/github.com/aws/smithy-go/properties.go deleted file mode 100644 index c9af66c0ea..0000000000 --- a/vendor/github.com/aws/smithy-go/properties.go +++ /dev/null @@ -1,62 +0,0 @@ -package smithy - -// PropertiesReader provides an interface for reading metadata from the -// underlying metadata container. -type PropertiesReader interface { - Get(key interface{}) interface{} -} - -// Properties provides storing and reading metadata values. Keys may be any -// comparable value type. Get and Set will panic if a key is not comparable. -// -// The zero value for a Properties instance is ready for reads/writes without -// any additional initialization. -type Properties struct { - values map[interface{}]interface{} -} - -// Get attempts to retrieve the value the key points to. Returns nil if the -// key was not found. -// -// Panics if key type is not comparable. -func (m *Properties) Get(key interface{}) interface{} { - m.lazyInit() - return m.values[key] -} - -// Set stores the value pointed to by the key. If a value already exists at -// that key it will be replaced with the new value. -// -// Panics if the key type is not comparable. -func (m *Properties) Set(key, value interface{}) { - m.lazyInit() - m.values[key] = value -} - -// Has returns whether the key exists in the metadata. -// -// Panics if the key type is not comparable. -func (m *Properties) Has(key interface{}) bool { - m.lazyInit() - _, ok := m.values[key] - return ok -} - -// SetAll accepts all of the given Properties into the receiver, overwriting -// any existing keys in the case of conflicts. -func (m *Properties) SetAll(other *Properties) { - if other.values == nil { - return - } - - m.lazyInit() - for k, v := range other.values { - m.values[k] = v - } -} - -func (m *Properties) lazyInit() { - if m.values == nil { - m.values = map[interface{}]interface{}{} - } -} diff --git a/vendor/github.com/aws/smithy-go/ptr/doc.go b/vendor/github.com/aws/smithy-go/ptr/doc.go deleted file mode 100644 index bc1f699616..0000000000 --- a/vendor/github.com/aws/smithy-go/ptr/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package ptr provides utilities for converting scalar literal type values to and from pointers inline. -package ptr - -//go:generate go run -tags codegen generate.go -//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/smithy-go/ptr/from_ptr.go b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go deleted file mode 100644 index a2845bb2c8..0000000000 --- a/vendor/github.com/aws/smithy-go/ptr/from_ptr.go +++ /dev/null @@ -1,601 +0,0 @@ -// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. -package ptr - -import ( - "time" -) - -// ToBool returns bool value dereferenced if the passed -// in pointer was not nil. Returns a bool zero value if the -// pointer was nil. -func ToBool(p *bool) (v bool) { - if p == nil { - return v - } - - return *p -} - -// ToBoolSlice returns a slice of bool values, that are -// dereferenced if the passed in pointer was not nil. Returns a bool -// zero value if the pointer was nil. -func ToBoolSlice(vs []*bool) []bool { - ps := make([]bool, len(vs)) - for i, v := range vs { - ps[i] = ToBool(v) - } - - return ps -} - -// ToBoolMap returns a map of bool values, that are -// dereferenced if the passed in pointer was not nil. The bool -// zero value is used if the pointer was nil. -func ToBoolMap(vs map[string]*bool) map[string]bool { - ps := make(map[string]bool, len(vs)) - for k, v := range vs { - ps[k] = ToBool(v) - } - - return ps -} - -// ToByte returns byte value dereferenced if the passed -// in pointer was not nil. Returns a byte zero value if the -// pointer was nil. -func ToByte(p *byte) (v byte) { - if p == nil { - return v - } - - return *p -} - -// ToByteSlice returns a slice of byte values, that are -// dereferenced if the passed in pointer was not nil. Returns a byte -// zero value if the pointer was nil. -func ToByteSlice(vs []*byte) []byte { - ps := make([]byte, len(vs)) - for i, v := range vs { - ps[i] = ToByte(v) - } - - return ps -} - -// ToByteMap returns a map of byte values, that are -// dereferenced if the passed in pointer was not nil. The byte -// zero value is used if the pointer was nil. -func ToByteMap(vs map[string]*byte) map[string]byte { - ps := make(map[string]byte, len(vs)) - for k, v := range vs { - ps[k] = ToByte(v) - } - - return ps -} - -// ToString returns string value dereferenced if the passed -// in pointer was not nil. Returns a string zero value if the -// pointer was nil. -func ToString(p *string) (v string) { - if p == nil { - return v - } - - return *p -} - -// ToStringSlice returns a slice of string values, that are -// dereferenced if the passed in pointer was not nil. Returns a string -// zero value if the pointer was nil. -func ToStringSlice(vs []*string) []string { - ps := make([]string, len(vs)) - for i, v := range vs { - ps[i] = ToString(v) - } - - return ps -} - -// ToStringMap returns a map of string values, that are -// dereferenced if the passed in pointer was not nil. The string -// zero value is used if the pointer was nil. -func ToStringMap(vs map[string]*string) map[string]string { - ps := make(map[string]string, len(vs)) - for k, v := range vs { - ps[k] = ToString(v) - } - - return ps -} - -// ToInt returns int value dereferenced if the passed -// in pointer was not nil. Returns a int zero value if the -// pointer was nil. -func ToInt(p *int) (v int) { - if p == nil { - return v - } - - return *p -} - -// ToIntSlice returns a slice of int values, that are -// dereferenced if the passed in pointer was not nil. Returns a int -// zero value if the pointer was nil. -func ToIntSlice(vs []*int) []int { - ps := make([]int, len(vs)) - for i, v := range vs { - ps[i] = ToInt(v) - } - - return ps -} - -// ToIntMap returns a map of int values, that are -// dereferenced if the passed in pointer was not nil. The int -// zero value is used if the pointer was nil. -func ToIntMap(vs map[string]*int) map[string]int { - ps := make(map[string]int, len(vs)) - for k, v := range vs { - ps[k] = ToInt(v) - } - - return ps -} - -// ToInt8 returns int8 value dereferenced if the passed -// in pointer was not nil. Returns a int8 zero value if the -// pointer was nil. -func ToInt8(p *int8) (v int8) { - if p == nil { - return v - } - - return *p -} - -// ToInt8Slice returns a slice of int8 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int8 -// zero value if the pointer was nil. -func ToInt8Slice(vs []*int8) []int8 { - ps := make([]int8, len(vs)) - for i, v := range vs { - ps[i] = ToInt8(v) - } - - return ps -} - -// ToInt8Map returns a map of int8 values, that are -// dereferenced if the passed in pointer was not nil. The int8 -// zero value is used if the pointer was nil. -func ToInt8Map(vs map[string]*int8) map[string]int8 { - ps := make(map[string]int8, len(vs)) - for k, v := range vs { - ps[k] = ToInt8(v) - } - - return ps -} - -// ToInt16 returns int16 value dereferenced if the passed -// in pointer was not nil. Returns a int16 zero value if the -// pointer was nil. -func ToInt16(p *int16) (v int16) { - if p == nil { - return v - } - - return *p -} - -// ToInt16Slice returns a slice of int16 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int16 -// zero value if the pointer was nil. -func ToInt16Slice(vs []*int16) []int16 { - ps := make([]int16, len(vs)) - for i, v := range vs { - ps[i] = ToInt16(v) - } - - return ps -} - -// ToInt16Map returns a map of int16 values, that are -// dereferenced if the passed in pointer was not nil. The int16 -// zero value is used if the pointer was nil. -func ToInt16Map(vs map[string]*int16) map[string]int16 { - ps := make(map[string]int16, len(vs)) - for k, v := range vs { - ps[k] = ToInt16(v) - } - - return ps -} - -// ToInt32 returns int32 value dereferenced if the passed -// in pointer was not nil. Returns a int32 zero value if the -// pointer was nil. -func ToInt32(p *int32) (v int32) { - if p == nil { - return v - } - - return *p -} - -// ToInt32Slice returns a slice of int32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int32 -// zero value if the pointer was nil. -func ToInt32Slice(vs []*int32) []int32 { - ps := make([]int32, len(vs)) - for i, v := range vs { - ps[i] = ToInt32(v) - } - - return ps -} - -// ToInt32Map returns a map of int32 values, that are -// dereferenced if the passed in pointer was not nil. The int32 -// zero value is used if the pointer was nil. -func ToInt32Map(vs map[string]*int32) map[string]int32 { - ps := make(map[string]int32, len(vs)) - for k, v := range vs { - ps[k] = ToInt32(v) - } - - return ps -} - -// ToInt64 returns int64 value dereferenced if the passed -// in pointer was not nil. Returns a int64 zero value if the -// pointer was nil. -func ToInt64(p *int64) (v int64) { - if p == nil { - return v - } - - return *p -} - -// ToInt64Slice returns a slice of int64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int64 -// zero value if the pointer was nil. -func ToInt64Slice(vs []*int64) []int64 { - ps := make([]int64, len(vs)) - for i, v := range vs { - ps[i] = ToInt64(v) - } - - return ps -} - -// ToInt64Map returns a map of int64 values, that are -// dereferenced if the passed in pointer was not nil. The int64 -// zero value is used if the pointer was nil. -func ToInt64Map(vs map[string]*int64) map[string]int64 { - ps := make(map[string]int64, len(vs)) - for k, v := range vs { - ps[k] = ToInt64(v) - } - - return ps -} - -// ToUint returns uint value dereferenced if the passed -// in pointer was not nil. Returns a uint zero value if the -// pointer was nil. -func ToUint(p *uint) (v uint) { - if p == nil { - return v - } - - return *p -} - -// ToUintSlice returns a slice of uint values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint -// zero value if the pointer was nil. -func ToUintSlice(vs []*uint) []uint { - ps := make([]uint, len(vs)) - for i, v := range vs { - ps[i] = ToUint(v) - } - - return ps -} - -// ToUintMap returns a map of uint values, that are -// dereferenced if the passed in pointer was not nil. The uint -// zero value is used if the pointer was nil. -func ToUintMap(vs map[string]*uint) map[string]uint { - ps := make(map[string]uint, len(vs)) - for k, v := range vs { - ps[k] = ToUint(v) - } - - return ps -} - -// ToUint8 returns uint8 value dereferenced if the passed -// in pointer was not nil. Returns a uint8 zero value if the -// pointer was nil. -func ToUint8(p *uint8) (v uint8) { - if p == nil { - return v - } - - return *p -} - -// ToUint8Slice returns a slice of uint8 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint8 -// zero value if the pointer was nil. -func ToUint8Slice(vs []*uint8) []uint8 { - ps := make([]uint8, len(vs)) - for i, v := range vs { - ps[i] = ToUint8(v) - } - - return ps -} - -// ToUint8Map returns a map of uint8 values, that are -// dereferenced if the passed in pointer was not nil. The uint8 -// zero value is used if the pointer was nil. -func ToUint8Map(vs map[string]*uint8) map[string]uint8 { - ps := make(map[string]uint8, len(vs)) - for k, v := range vs { - ps[k] = ToUint8(v) - } - - return ps -} - -// ToUint16 returns uint16 value dereferenced if the passed -// in pointer was not nil. Returns a uint16 zero value if the -// pointer was nil. -func ToUint16(p *uint16) (v uint16) { - if p == nil { - return v - } - - return *p -} - -// ToUint16Slice returns a slice of uint16 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint16 -// zero value if the pointer was nil. -func ToUint16Slice(vs []*uint16) []uint16 { - ps := make([]uint16, len(vs)) - for i, v := range vs { - ps[i] = ToUint16(v) - } - - return ps -} - -// ToUint16Map returns a map of uint16 values, that are -// dereferenced if the passed in pointer was not nil. The uint16 -// zero value is used if the pointer was nil. -func ToUint16Map(vs map[string]*uint16) map[string]uint16 { - ps := make(map[string]uint16, len(vs)) - for k, v := range vs { - ps[k] = ToUint16(v) - } - - return ps -} - -// ToUint32 returns uint32 value dereferenced if the passed -// in pointer was not nil. Returns a uint32 zero value if the -// pointer was nil. -func ToUint32(p *uint32) (v uint32) { - if p == nil { - return v - } - - return *p -} - -// ToUint32Slice returns a slice of uint32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint32 -// zero value if the pointer was nil. -func ToUint32Slice(vs []*uint32) []uint32 { - ps := make([]uint32, len(vs)) - for i, v := range vs { - ps[i] = ToUint32(v) - } - - return ps -} - -// ToUint32Map returns a map of uint32 values, that are -// dereferenced if the passed in pointer was not nil. The uint32 -// zero value is used if the pointer was nil. -func ToUint32Map(vs map[string]*uint32) map[string]uint32 { - ps := make(map[string]uint32, len(vs)) - for k, v := range vs { - ps[k] = ToUint32(v) - } - - return ps -} - -// ToUint64 returns uint64 value dereferenced if the passed -// in pointer was not nil. Returns a uint64 zero value if the -// pointer was nil. -func ToUint64(p *uint64) (v uint64) { - if p == nil { - return v - } - - return *p -} - -// ToUint64Slice returns a slice of uint64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint64 -// zero value if the pointer was nil. -func ToUint64Slice(vs []*uint64) []uint64 { - ps := make([]uint64, len(vs)) - for i, v := range vs { - ps[i] = ToUint64(v) - } - - return ps -} - -// ToUint64Map returns a map of uint64 values, that are -// dereferenced if the passed in pointer was not nil. The uint64 -// zero value is used if the pointer was nil. -func ToUint64Map(vs map[string]*uint64) map[string]uint64 { - ps := make(map[string]uint64, len(vs)) - for k, v := range vs { - ps[k] = ToUint64(v) - } - - return ps -} - -// ToFloat32 returns float32 value dereferenced if the passed -// in pointer was not nil. Returns a float32 zero value if the -// pointer was nil. -func ToFloat32(p *float32) (v float32) { - if p == nil { - return v - } - - return *p -} - -// ToFloat32Slice returns a slice of float32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a float32 -// zero value if the pointer was nil. -func ToFloat32Slice(vs []*float32) []float32 { - ps := make([]float32, len(vs)) - for i, v := range vs { - ps[i] = ToFloat32(v) - } - - return ps -} - -// ToFloat32Map returns a map of float32 values, that are -// dereferenced if the passed in pointer was not nil. The float32 -// zero value is used if the pointer was nil. -func ToFloat32Map(vs map[string]*float32) map[string]float32 { - ps := make(map[string]float32, len(vs)) - for k, v := range vs { - ps[k] = ToFloat32(v) - } - - return ps -} - -// ToFloat64 returns float64 value dereferenced if the passed -// in pointer was not nil. Returns a float64 zero value if the -// pointer was nil. -func ToFloat64(p *float64) (v float64) { - if p == nil { - return v - } - - return *p -} - -// ToFloat64Slice returns a slice of float64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a float64 -// zero value if the pointer was nil. -func ToFloat64Slice(vs []*float64) []float64 { - ps := make([]float64, len(vs)) - for i, v := range vs { - ps[i] = ToFloat64(v) - } - - return ps -} - -// ToFloat64Map returns a map of float64 values, that are -// dereferenced if the passed in pointer was not nil. The float64 -// zero value is used if the pointer was nil. -func ToFloat64Map(vs map[string]*float64) map[string]float64 { - ps := make(map[string]float64, len(vs)) - for k, v := range vs { - ps[k] = ToFloat64(v) - } - - return ps -} - -// ToTime returns time.Time value dereferenced if the passed -// in pointer was not nil. Returns a time.Time zero value if the -// pointer was nil. -func ToTime(p *time.Time) (v time.Time) { - if p == nil { - return v - } - - return *p -} - -// ToTimeSlice returns a slice of time.Time values, that are -// dereferenced if the passed in pointer was not nil. Returns a time.Time -// zero value if the pointer was nil. -func ToTimeSlice(vs []*time.Time) []time.Time { - ps := make([]time.Time, len(vs)) - for i, v := range vs { - ps[i] = ToTime(v) - } - - return ps -} - -// ToTimeMap returns a map of time.Time values, that are -// dereferenced if the passed in pointer was not nil. The time.Time -// zero value is used if the pointer was nil. -func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { - ps := make(map[string]time.Time, len(vs)) - for k, v := range vs { - ps[k] = ToTime(v) - } - - return ps -} - -// ToDuration returns time.Duration value dereferenced if the passed -// in pointer was not nil. Returns a time.Duration zero value if the -// pointer was nil. -func ToDuration(p *time.Duration) (v time.Duration) { - if p == nil { - return v - } - - return *p -} - -// ToDurationSlice returns a slice of time.Duration values, that are -// dereferenced if the passed in pointer was not nil. Returns a time.Duration -// zero value if the pointer was nil. -func ToDurationSlice(vs []*time.Duration) []time.Duration { - ps := make([]time.Duration, len(vs)) - for i, v := range vs { - ps[i] = ToDuration(v) - } - - return ps -} - -// ToDurationMap returns a map of time.Duration values, that are -// dereferenced if the passed in pointer was not nil. The time.Duration -// zero value is used if the pointer was nil. -func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { - ps := make(map[string]time.Duration, len(vs)) - for k, v := range vs { - ps[k] = ToDuration(v) - } - - return ps -} diff --git a/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go deleted file mode 100644 index 97f01011e7..0000000000 --- a/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go +++ /dev/null @@ -1,83 +0,0 @@ -//go:build codegen -// +build codegen - -package ptr - -import "strings" - -func GetScalars() Scalars { - return Scalars{ - {Type: "bool"}, - {Type: "byte"}, - {Type: "string"}, - {Type: "int"}, - {Type: "int8"}, - {Type: "int16"}, - {Type: "int32"}, - {Type: "int64"}, - {Type: "uint"}, - {Type: "uint8"}, - {Type: "uint16"}, - {Type: "uint32"}, - {Type: "uint64"}, - {Type: "float32"}, - {Type: "float64"}, - {Type: "Time", Import: &Import{Path: "time"}}, - {Type: "Duration", Import: &Import{Path: "time"}}, - } -} - -// Import provides the import path and optional alias -type Import struct { - Path string - Alias string -} - -// Package returns the Go package name for the import. Returns alias if set. -func (i Import) Package() string { - if v := i.Alias; len(v) != 0 { - return v - } - - if v := i.Path; len(v) != 0 { - parts := strings.Split(v, "/") - pkg := parts[len(parts)-1] - return pkg - } - - return "" -} - -// Scalar provides the definition of a type to generate pointer utilities for. -type Scalar struct { - Type string - Import *Import -} - -// Name returns the exported function name for the type. -func (t Scalar) Name() string { - return strings.Title(t.Type) -} - -// Symbol returns the scalar's Go symbol with path if needed. -func (t Scalar) Symbol() string { - if t.Import != nil { - return t.Import.Package() + "." + t.Type - } - return t.Type -} - -// Scalars is a list of scalars. -type Scalars []Scalar - -// Imports returns all imports for the scalars. -func (ts Scalars) Imports() []*Import { - imports := []*Import{} - for _, t := range ts { - if v := t.Import; v != nil { - imports = append(imports, v) - } - } - - return imports -} diff --git a/vendor/github.com/aws/smithy-go/ptr/to_ptr.go b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go deleted file mode 100644 index 0bfbbecbdc..0000000000 --- a/vendor/github.com/aws/smithy-go/ptr/to_ptr.go +++ /dev/null @@ -1,499 +0,0 @@ -// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. -package ptr - -import ( - "time" -) - -// Bool returns a pointer value for the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolSlice returns a slice of bool pointers from the values -// passed in. -func BoolSlice(vs []bool) []*bool { - ps := make([]*bool, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// BoolMap returns a map of bool pointers from the values -// passed in. -func BoolMap(vs map[string]bool) map[string]*bool { - ps := make(map[string]*bool, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Byte returns a pointer value for the byte value passed in. -func Byte(v byte) *byte { - return &v -} - -// ByteSlice returns a slice of byte pointers from the values -// passed in. -func ByteSlice(vs []byte) []*byte { - ps := make([]*byte, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// ByteMap returns a map of byte pointers from the values -// passed in. -func ByteMap(vs map[string]byte) map[string]*byte { - ps := make(map[string]*byte, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// String returns a pointer value for the string value passed in. -func String(v string) *string { - return &v -} - -// StringSlice returns a slice of string pointers from the values -// passed in. -func StringSlice(vs []string) []*string { - ps := make([]*string, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// StringMap returns a map of string pointers from the values -// passed in. -func StringMap(vs map[string]string) map[string]*string { - ps := make(map[string]*string, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Int returns a pointer value for the int value passed in. -func Int(v int) *int { - return &v -} - -// IntSlice returns a slice of int pointers from the values -// passed in. -func IntSlice(vs []int) []*int { - ps := make([]*int, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// IntMap returns a map of int pointers from the values -// passed in. -func IntMap(vs map[string]int) map[string]*int { - ps := make(map[string]*int, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Int8 returns a pointer value for the int8 value passed in. -func Int8(v int8) *int8 { - return &v -} - -// Int8Slice returns a slice of int8 pointers from the values -// passed in. -func Int8Slice(vs []int8) []*int8 { - ps := make([]*int8, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Int8Map returns a map of int8 pointers from the values -// passed in. -func Int8Map(vs map[string]int8) map[string]*int8 { - ps := make(map[string]*int8, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Int16 returns a pointer value for the int16 value passed in. -func Int16(v int16) *int16 { - return &v -} - -// Int16Slice returns a slice of int16 pointers from the values -// passed in. -func Int16Slice(vs []int16) []*int16 { - ps := make([]*int16, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Int16Map returns a map of int16 pointers from the values -// passed in. -func Int16Map(vs map[string]int16) map[string]*int16 { - ps := make(map[string]*int16, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Int32 returns a pointer value for the int32 value passed in. -func Int32(v int32) *int32 { - return &v -} - -// Int32Slice returns a slice of int32 pointers from the values -// passed in. -func Int32Slice(vs []int32) []*int32 { - ps := make([]*int32, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Int32Map returns a map of int32 pointers from the values -// passed in. -func Int32Map(vs map[string]int32) map[string]*int32 { - ps := make(map[string]*int32, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Int64 returns a pointer value for the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Slice returns a slice of int64 pointers from the values -// passed in. -func Int64Slice(vs []int64) []*int64 { - ps := make([]*int64, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Int64Map returns a map of int64 pointers from the values -// passed in. -func Int64Map(vs map[string]int64) map[string]*int64 { - ps := make(map[string]*int64, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Uint returns a pointer value for the uint value passed in. -func Uint(v uint) *uint { - return &v -} - -// UintSlice returns a slice of uint pointers from the values -// passed in. -func UintSlice(vs []uint) []*uint { - ps := make([]*uint, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// UintMap returns a map of uint pointers from the values -// passed in. -func UintMap(vs map[string]uint) map[string]*uint { - ps := make(map[string]*uint, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Uint8 returns a pointer value for the uint8 value passed in. -func Uint8(v uint8) *uint8 { - return &v -} - -// Uint8Slice returns a slice of uint8 pointers from the values -// passed in. -func Uint8Slice(vs []uint8) []*uint8 { - ps := make([]*uint8, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Uint8Map returns a map of uint8 pointers from the values -// passed in. -func Uint8Map(vs map[string]uint8) map[string]*uint8 { - ps := make(map[string]*uint8, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Uint16 returns a pointer value for the uint16 value passed in. -func Uint16(v uint16) *uint16 { - return &v -} - -// Uint16Slice returns a slice of uint16 pointers from the values -// passed in. -func Uint16Slice(vs []uint16) []*uint16 { - ps := make([]*uint16, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Uint16Map returns a map of uint16 pointers from the values -// passed in. -func Uint16Map(vs map[string]uint16) map[string]*uint16 { - ps := make(map[string]*uint16, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Uint32 returns a pointer value for the uint32 value passed in. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint32Slice returns a slice of uint32 pointers from the values -// passed in. -func Uint32Slice(vs []uint32) []*uint32 { - ps := make([]*uint32, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Uint32Map returns a map of uint32 pointers from the values -// passed in. -func Uint32Map(vs map[string]uint32) map[string]*uint32 { - ps := make(map[string]*uint32, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Uint64 returns a pointer value for the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return &v -} - -// Uint64Slice returns a slice of uint64 pointers from the values -// passed in. -func Uint64Slice(vs []uint64) []*uint64 { - ps := make([]*uint64, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Uint64Map returns a map of uint64 pointers from the values -// passed in. -func Uint64Map(vs map[string]uint64) map[string]*uint64 { - ps := make(map[string]*uint64, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Float32 returns a pointer value for the float32 value passed in. -func Float32(v float32) *float32 { - return &v -} - -// Float32Slice returns a slice of float32 pointers from the values -// passed in. -func Float32Slice(vs []float32) []*float32 { - ps := make([]*float32, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Float32Map returns a map of float32 pointers from the values -// passed in. -func Float32Map(vs map[string]float32) map[string]*float32 { - ps := make(map[string]*float32, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Float64 returns a pointer value for the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Slice returns a slice of float64 pointers from the values -// passed in. -func Float64Slice(vs []float64) []*float64 { - ps := make([]*float64, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Float64Map returns a map of float64 pointers from the values -// passed in. -func Float64Map(vs map[string]float64) map[string]*float64 { - ps := make(map[string]*float64, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Time returns a pointer value for the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeSlice returns a slice of time.Time pointers from the values -// passed in. -func TimeSlice(vs []time.Time) []*time.Time { - ps := make([]*time.Time, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// TimeMap returns a map of time.Time pointers from the values -// passed in. -func TimeMap(vs map[string]time.Time) map[string]*time.Time { - ps := make(map[string]*time.Time, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Duration returns a pointer value for the time.Duration value passed in. -func Duration(v time.Duration) *time.Duration { - return &v -} - -// DurationSlice returns a slice of time.Duration pointers from the values -// passed in. -func DurationSlice(vs []time.Duration) []*time.Duration { - ps := make([]*time.Duration, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// DurationMap returns a map of time.Duration pointers from the values -// passed in. -func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { - ps := make(map[string]*time.Duration, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} diff --git a/vendor/github.com/aws/smithy-go/rand/doc.go b/vendor/github.com/aws/smithy-go/rand/doc.go deleted file mode 100644 index f8b25d5625..0000000000 --- a/vendor/github.com/aws/smithy-go/rand/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package rand provides utilities for creating and working with random value -// generators. -package rand diff --git a/vendor/github.com/aws/smithy-go/rand/rand.go b/vendor/github.com/aws/smithy-go/rand/rand.go deleted file mode 100644 index 9c479f62b5..0000000000 --- a/vendor/github.com/aws/smithy-go/rand/rand.go +++ /dev/null @@ -1,31 +0,0 @@ -package rand - -import ( - "crypto/rand" - "fmt" - "io" - "math/big" -) - -func init() { - Reader = rand.Reader -} - -// Reader provides a random reader that can reset during testing. -var Reader io.Reader - -// Int63n returns a int64 between zero and value of max, read from an io.Reader source. -func Int63n(reader io.Reader, max int64) (int64, error) { - bi, err := rand.Int(reader, big.NewInt(max)) - if err != nil { - return 0, fmt.Errorf("failed to read random value, %w", err) - } - - return bi.Int64(), nil -} - -// CryptoRandInt63n returns a random int64 between zero and value of max -// obtained from the crypto rand source. -func CryptoRandInt63n(max int64) (int64, error) { - return Int63n(Reader, max) -} diff --git a/vendor/github.com/aws/smithy-go/rand/uuid.go b/vendor/github.com/aws/smithy-go/rand/uuid.go deleted file mode 100644 index dc81cbc68a..0000000000 --- a/vendor/github.com/aws/smithy-go/rand/uuid.go +++ /dev/null @@ -1,87 +0,0 @@ -package rand - -import ( - "encoding/hex" - "io" -) - -const dash byte = '-' - -// UUIDIdempotencyToken provides a utility to get idempotency tokens in the -// UUID format. -type UUIDIdempotencyToken struct { - uuid *UUID -} - -// NewUUIDIdempotencyToken returns a idempotency token provider returning -// tokens in the UUID random format using the reader provided. -func NewUUIDIdempotencyToken(r io.Reader) *UUIDIdempotencyToken { - return &UUIDIdempotencyToken{uuid: NewUUID(r)} -} - -// GetIdempotencyToken returns a random UUID value for Idempotency token. -func (u UUIDIdempotencyToken) GetIdempotencyToken() (string, error) { - return u.uuid.GetUUID() -} - -// UUID provides computing random UUID version 4 values from a random source -// reader. -type UUID struct { - randSrc io.Reader -} - -// NewUUID returns an initialized UUID value that can be used to retrieve -// random UUID version 4 values. -func NewUUID(r io.Reader) *UUID { - return &UUID{randSrc: r} -} - -// GetUUID returns a random UUID version 4 string representation sourced from the random reader the -// UUID was created with. Returns an error if unable to compute the UUID. -func (r *UUID) GetUUID() (string, error) { - var b [16]byte - if _, err := io.ReadFull(r.randSrc, b[:]); err != nil { - return "", err - } - r.makeUUIDv4(b[:]) - return format(b), nil -} - -// GetBytes returns a byte slice containing a random UUID version 4 sourced from the random reader the -// UUID was created with. Returns an error if unable to compute the UUID. -func (r *UUID) GetBytes() (u []byte, err error) { - u = make([]byte, 16) - if _, err = io.ReadFull(r.randSrc, u); err != nil { - return u, err - } - r.makeUUIDv4(u) - return u, nil -} - -func (r *UUID) makeUUIDv4(u []byte) { - // 13th character is "4" - u[6] = (u[6] & 0x0f) | 0x40 // Version 4 - // 17th character is "8", "9", "a", or "b" - u[8] = (u[8] & 0x3f) | 0x80 // Variant most significant bits are 10x where x can be either 1 or 0 -} - -// Format returns the canonical text representation of a UUID. -// This implementation is optimized to not use fmt. -// Example: 82e42f16-b6cc-4d5b-95f5-d403c4befd3d -func format(u [16]byte) string { - // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 - - var scratch [36]byte - - hex.Encode(scratch[:8], u[0:4]) - scratch[8] = dash - hex.Encode(scratch[9:13], u[4:6]) - scratch[13] = dash - hex.Encode(scratch[14:18], u[6:8]) - scratch[18] = dash - hex.Encode(scratch[19:23], u[8:10]) - scratch[23] = dash - hex.Encode(scratch[24:], u[10:]) - - return string(scratch[:]) -} diff --git a/vendor/github.com/aws/smithy-go/time/time.go b/vendor/github.com/aws/smithy-go/time/time.go deleted file mode 100644 index b552a09f8a..0000000000 --- a/vendor/github.com/aws/smithy-go/time/time.go +++ /dev/null @@ -1,134 +0,0 @@ -package time - -import ( - "context" - "fmt" - "math/big" - "strings" - "time" -) - -const ( - // dateTimeFormat is a IMF-fixdate formatted RFC3339 section 5.6 - dateTimeFormatInput = "2006-01-02T15:04:05.999999999Z" - dateTimeFormatInputNoZ = "2006-01-02T15:04:05.999999999" - dateTimeFormatOutput = "2006-01-02T15:04:05.999Z" - - // httpDateFormat is a date time defined by RFC 7231#section-7.1.1.1 - // IMF-fixdate with no UTC offset. - httpDateFormat = "Mon, 02 Jan 2006 15:04:05 GMT" - // Additional formats needed for compatibility. - httpDateFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" - httpDateFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" -) - -var millisecondFloat = big.NewFloat(1e3) - -// FormatDateTime formats value as a date-time, (RFC3339 section 5.6) -// -// Example: 1985-04-12T23:20:50.52Z -func FormatDateTime(value time.Time) string { - return value.UTC().Format(dateTimeFormatOutput) -} - -// ParseDateTime parses a string as a date-time, (RFC3339 section 5.6) -// -// Example: 1985-04-12T23:20:50.52Z -func ParseDateTime(value string) (time.Time, error) { - return tryParse(value, - dateTimeFormatInput, - dateTimeFormatInputNoZ, - time.RFC3339Nano, - time.RFC3339, - ) -} - -// FormatHTTPDate formats value as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) -// -// Example: Tue, 29 Apr 2014 18:30:38 GMT -func FormatHTTPDate(value time.Time) string { - return value.UTC().Format(httpDateFormat) -} - -// ParseHTTPDate parses a string as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) -// -// Example: Tue, 29 Apr 2014 18:30:38 GMT -func ParseHTTPDate(value string) (time.Time, error) { - return tryParse(value, - httpDateFormat, - httpDateFormatSingleDigitDay, - httpDateFormatSingleDigitDayTwoDigitYear, - time.RFC850, - time.ANSIC, - ) -} - -// FormatEpochSeconds returns value as a Unix time in seconds with with decimal precision -// -// Example: 1515531081.123 -func FormatEpochSeconds(value time.Time) float64 { - ms := value.UnixNano() / int64(time.Millisecond) - return float64(ms) / 1e3 -} - -// ParseEpochSeconds returns value as a Unix time in seconds with with decimal precision -// -// Example: 1515531081.123 -func ParseEpochSeconds(value float64) time.Time { - f := big.NewFloat(value) - f = f.Mul(f, millisecondFloat) - i, _ := f.Int64() - // Offset to `UTC` because time.Unix returns the time value based on system - // local setting. - return time.Unix(0, i*1e6).UTC() -} - -func tryParse(v string, formats ...string) (time.Time, error) { - var errs parseErrors - for _, f := range formats { - t, err := time.Parse(f, v) - if err != nil { - errs = append(errs, parseError{ - Format: f, - Err: err, - }) - continue - } - return t, nil - } - - return time.Time{}, fmt.Errorf("unable to parse time string, %w", errs) -} - -type parseErrors []parseError - -func (es parseErrors) Error() string { - var s strings.Builder - for _, e := range es { - fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err) - } - - return "parse errors:" + s.String() -} - -type parseError struct { - Format string - Err error -} - -// SleepWithContext will wait for the timer duration to expire, or until the context -// is canceled. Whichever happens first. If the context is canceled the -// Context's error will be returned. -func SleepWithContext(ctx context.Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/auth.go b/vendor/github.com/aws/smithy-go/transport/http/auth.go deleted file mode 100644 index 58e1ab5ef8..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/auth.go +++ /dev/null @@ -1,21 +0,0 @@ -package http - -import ( - "context" - - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" -) - -// AuthScheme defines an HTTP authentication scheme. -type AuthScheme interface { - SchemeID() string - IdentityResolver(auth.IdentityResolverOptions) auth.IdentityResolver - Signer() Signer -} - -// Signer defines the interface through which HTTP requests are supplemented -// with an Identity. -type Signer interface { - SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go b/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go deleted file mode 100644 index d60cf2a60f..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go +++ /dev/null @@ -1,45 +0,0 @@ -package http - -import ( - "context" - - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" -) - -// NewAnonymousScheme returns the anonymous HTTP auth scheme. -func NewAnonymousScheme() AuthScheme { - return &authScheme{ - schemeID: auth.SchemeIDAnonymous, - signer: &nopSigner{}, - } -} - -// authScheme is parameterized to generically implement the exported AuthScheme -// interface -type authScheme struct { - schemeID string - signer Signer -} - -var _ AuthScheme = (*authScheme)(nil) - -func (s *authScheme) SchemeID() string { - return s.schemeID -} - -func (s *authScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver { - return o.GetIdentityResolver(s.schemeID) -} - -func (s *authScheme) Signer() Signer { - return s.signer -} - -type nopSigner struct{} - -var _ Signer = (*nopSigner)(nil) - -func (*nopSigner) SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error { - return nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go deleted file mode 100644 index bc4ad6e797..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go +++ /dev/null @@ -1,70 +0,0 @@ -package http - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" -) - -const contentMD5Header = "Content-Md5" - -// contentMD5Checksum provides a middleware to compute and set -// content-md5 checksum for a http request -type contentMD5Checksum struct { -} - -// AddContentChecksumMiddleware adds checksum middleware to middleware's -// build step. -func AddContentChecksumMiddleware(stack *middleware.Stack) error { - // This middleware must be executed before request body is set. - return stack.Build.Add(&contentMD5Checksum{}, middleware.Before) -} - -// ID returns the identifier for the checksum middleware -func (m *contentMD5Checksum) ID() string { return "ContentChecksum" } - -// HandleBuild adds behavior to compute md5 checksum and add content-md5 header -// on http request -func (m *contentMD5Checksum) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - // if Content-MD5 header is already present, return - if v := req.Header.Get(contentMD5Header); len(v) != 0 { - return next.HandleBuild(ctx, in) - } - - // fetch the request stream. - stream := req.GetStream() - // compute checksum if payload is explicit - if stream != nil { - if !req.IsStreamSeekable() { - return out, metadata, fmt.Errorf( - "unseekable stream is not supported for computing md5 checksum") - } - - v, err := computeMD5Checksum(stream) - if err != nil { - return out, metadata, fmt.Errorf("error computing md5 checksum, %w", err) - } - - // reset the request stream - if err := req.RewindStream(); err != nil { - return out, metadata, fmt.Errorf( - "error rewinding request stream after computing md5 checksum, %w", err) - } - - // set the 'Content-MD5' header - req.Header.Set(contentMD5Header, string(v)) - } - - // set md5 header value - return next.HandleBuild(ctx, in) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/client.go b/vendor/github.com/aws/smithy-go/transport/http/client.go deleted file mode 100644 index e691c69bf4..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/client.go +++ /dev/null @@ -1,120 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -// ClientDo provides the interface for custom HTTP client implementations. -type ClientDo interface { - Do(*http.Request) (*http.Response, error) -} - -// ClientDoFunc provides a helper to wrap a function as an HTTP client for -// round tripping requests. -type ClientDoFunc func(*http.Request) (*http.Response, error) - -// Do will invoke the underlying func, returning the result. -func (fn ClientDoFunc) Do(r *http.Request) (*http.Response, error) { - return fn(r) -} - -// ClientHandler wraps a client that implements the HTTP Do method. Standard -// implementation is http.Client. -type ClientHandler struct { - client ClientDo -} - -// NewClientHandler returns an initialized middleware handler for the client. -func NewClientHandler(client ClientDo) ClientHandler { - return ClientHandler{ - client: client, - } -} - -// Handle implements the middleware Handler interface, that will invoke the -// underlying HTTP client. Requires the input to be a Smithy *Request. Returns -// a smithy *Response, or error if the request failed. -func (c ClientHandler) Handle(ctx context.Context, input interface{}) ( - out interface{}, metadata middleware.Metadata, err error, -) { - req, ok := input.(*Request) - if !ok { - return nil, metadata, fmt.Errorf("expect Smithy http.Request value as input, got unsupported type %T", input) - } - - builtRequest := req.Build(ctx) - if err := ValidateEndpointHost(builtRequest.Host); err != nil { - return nil, metadata, err - } - - resp, err := c.client.Do(builtRequest) - if resp == nil { - // Ensure a http response value is always present to prevent unexpected - // panics. - resp = &http.Response{ - Header: http.Header{}, - Body: http.NoBody, - } - } - if err != nil { - err = &RequestSendError{Err: err} - - // Override the error with a context canceled error, if that was canceled. - select { - case <-ctx.Done(): - err = &smithy.CanceledError{Err: ctx.Err()} - default: - } - } - - // HTTP RoundTripper *should* close the request body. But this may not happen in a timely manner. - // So instead Smithy *Request Build wraps the body to be sent in a safe closer that will clear the - // stream reference so that it can be safely reused. - if builtRequest.Body != nil { - _ = builtRequest.Body.Close() - } - - return &Response{Response: resp}, metadata, err -} - -// RequestSendError provides a generic request transport error. This error -// should wrap errors making HTTP client requests. -// -// The ClientHandler will wrap the HTTP client's error if the client request -// fails, and did not fail because of context canceled. -type RequestSendError struct { - Err error -} - -// ConnectionError returns that the error is related to not being able to send -// the request, or receive a response from the service. -func (e *RequestSendError) ConnectionError() bool { - return true -} - -// Unwrap returns the underlying error, if there was one. -func (e *RequestSendError) Unwrap() error { - return e.Err -} - -func (e *RequestSendError) Error() string { - return fmt.Sprintf("request send failed, %v", e.Err) -} - -// NopClient provides a client that ignores the request, and returns an empty -// successful HTTP response value. -type NopClient struct{} - -// Do ignores the request and returns a 200 status empty response. -func (NopClient) Do(r *http.Request) (*http.Response, error) { - return &http.Response{ - StatusCode: 200, - Header: http.Header{}, - Body: http.NoBody, - }, nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/doc.go b/vendor/github.com/aws/smithy-go/transport/http/doc.go deleted file mode 100644 index 07366ac85a..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -/* -Package http provides the HTTP transport client and request/response types -needed to round trip API operation calls with an service. -*/ -package http diff --git a/vendor/github.com/aws/smithy-go/transport/http/headerlist.go b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go deleted file mode 100644 index cbc9deb4df..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/headerlist.go +++ /dev/null @@ -1,163 +0,0 @@ -package http - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -func splitHeaderListValues(vs []string, splitFn func(string) ([]string, error)) ([]string, error) { - values := make([]string, 0, len(vs)) - - for i := 0; i < len(vs); i++ { - parts, err := splitFn(vs[i]) - if err != nil { - return nil, err - } - values = append(values, parts...) - } - - return values, nil -} - -// SplitHeaderListValues attempts to split the elements of the slice by commas, -// and return a list of all values separated. Returns error if unable to -// separate the values. -func SplitHeaderListValues(vs []string) ([]string, error) { - return splitHeaderListValues(vs, quotedCommaSplit) -} - -func quotedCommaSplit(v string) (parts []string, err error) { - v = strings.TrimSpace(v) - - expectMore := true - for i := 0; i < len(v); i++ { - if unicode.IsSpace(rune(v[i])) { - continue - } - expectMore = false - - // leading space in part is ignored. - // Start of value must be non-space, or quote. - // - // - If quote, enter quoted mode, find next non-escaped quote to - // terminate the value. - // - Otherwise, find next comma to terminate value. - - remaining := v[i:] - - var value string - var valueLen int - if remaining[0] == '"' { - //------------------------------ - // Quoted value - //------------------------------ - var j int - var skipQuote bool - for j += 1; j < len(remaining); j++ { - if remaining[j] == '\\' || (remaining[j] != '\\' && skipQuote) { - skipQuote = !skipQuote - continue - } - if remaining[j] == '"' { - break - } - } - if j == len(remaining) || j == 1 { - return nil, fmt.Errorf("value %v missing closing double quote", - remaining) - } - valueLen = j + 1 - - tail := remaining[valueLen:] - var k int - for ; k < len(tail); k++ { - if !unicode.IsSpace(rune(tail[k])) && tail[k] != ',' { - return nil, fmt.Errorf("value %v has non-space trailing characters", - remaining) - } - if tail[k] == ',' { - expectMore = true - break - } - } - value = remaining[:valueLen] - value, err = strconv.Unquote(value) - if err != nil { - return nil, fmt.Errorf("failed to unquote value %v, %w", value, err) - } - - // Pad valueLen to include trailing space(s) so `i` is updated correctly. - valueLen += k - - } else { - //------------------------------ - // Unquoted value - //------------------------------ - - // Index of the next comma is the length of the value, or end of string. - valueLen = strings.Index(remaining, ",") - if valueLen != -1 { - expectMore = true - } else { - valueLen = len(remaining) - } - value = strings.TrimSpace(remaining[:valueLen]) - } - - i += valueLen - parts = append(parts, value) - - } - - if expectMore { - parts = append(parts, "") - } - - return parts, nil -} - -// SplitHTTPDateTimestampHeaderListValues attempts to split the HTTP-Date -// timestamp values in the slice by commas, and return a list of all values -// separated. The split is aware of the HTTP-Date timestamp format, and will skip -// comma within the timestamp value. Returns an error if unable to split the -// timestamp values. -func SplitHTTPDateTimestampHeaderListValues(vs []string) ([]string, error) { - return splitHeaderListValues(vs, splitHTTPDateHeaderValue) -} - -func splitHTTPDateHeaderValue(v string) ([]string, error) { - if n := strings.Count(v, ","); n <= 1 { - // Nothing to do if only contains a no, or single HTTPDate value - return []string{v}, nil - } else if n%2 == 0 { - return nil, fmt.Errorf("invalid timestamp HTTPDate header comma separations, %q", v) - } - - var parts []string - var i, j int - - var doSplit bool - for ; i < len(v); i++ { - if v[i] == ',' { - if doSplit { - doSplit = false - parts = append(parts, strings.TrimSpace(v[j:i])) - j = i + 1 - } else { - // Skip the first comma in the timestamp value since that - // separates the day from the rest of the timestamp. - // - // Tue, 17 Dec 2019 23:48:18 GMT - doSplit = true - } - } - } - // Add final part - if j < len(v) { - parts = append(parts, strings.TrimSpace(v[j:])) - } - - return parts, nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/host.go b/vendor/github.com/aws/smithy-go/transport/http/host.go deleted file mode 100644 index 6b290fec03..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/host.go +++ /dev/null @@ -1,89 +0,0 @@ -package http - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -// ValidateEndpointHost validates that the host string passed in is a valid RFC -// 3986 host. Returns error if the host is not valid. -func ValidateEndpointHost(host string) error { - var errors strings.Builder - var hostname string - var port string - var err error - - if strings.Contains(host, ":") { - hostname, port, err = net.SplitHostPort(host) - if err != nil { - errors.WriteString(fmt.Sprintf("\n endpoint %v, failed to parse, got ", host)) - errors.WriteString(err.Error()) - } - - if !ValidPortNumber(port) { - errors.WriteString(fmt.Sprintf("port number should be in range [0-65535], got %v", port)) - } - } else { - hostname = host - } - - labels := strings.Split(hostname, ".") - for i, label := range labels { - if i == len(labels)-1 && len(label) == 0 { - // Allow trailing dot for FQDN hosts. - continue - } - - if !ValidHostLabel(label) { - errors.WriteString("\nendpoint host domain labels must match \"[a-zA-Z0-9-]{1,63}\", but found: ") - errors.WriteString(label) - } - } - - if len(hostname) == 0 && len(port) != 0 { - errors.WriteString("\nendpoint host with port must not be empty") - } - - if len(hostname) > 255 { - errors.WriteString(fmt.Sprintf("\nendpoint host must be less than 255 characters, but was %d", len(hostname))) - } - - if len(errors.String()) > 0 { - return fmt.Errorf("invalid endpoint host%s", errors.String()) - } - return nil -} - -// ValidPortNumber returns whether the port is valid RFC 3986 port. -func ValidPortNumber(port string) bool { - i, err := strconv.Atoi(port) - if err != nil { - return false - } - - if i < 0 || i > 65535 { - return false - } - return true -} - -// ValidHostLabel returns whether the label is a valid RFC 3986 host abel. -func ValidHostLabel(label string) bool { - if l := len(label); l == 0 || l > 63 { - return false - } - for _, r := range label { - switch { - case r >= '0' && r <= '9': - case r >= 'A' && r <= 'Z': - case r >= 'a' && r <= 'z': - case r == '-': - default: - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go deleted file mode 100644 index 941a8d6b51..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go +++ /dev/null @@ -1,75 +0,0 @@ -package io - -import ( - "io" - "sync" -) - -// NewSafeReadCloser returns a new safeReadCloser that wraps readCloser. -func NewSafeReadCloser(readCloser io.ReadCloser) io.ReadCloser { - sr := &safeReadCloser{ - readCloser: readCloser, - } - - if _, ok := readCloser.(io.WriterTo); ok { - return &safeWriteToReadCloser{safeReadCloser: sr} - } - - return sr -} - -// safeWriteToReadCloser wraps a safeReadCloser but exposes a WriteTo interface implementation. This will panic -// if the underlying io.ReadClose does not support WriteTo. Use NewSafeReadCloser to ensure the proper handling of this -// type. -type safeWriteToReadCloser struct { - *safeReadCloser -} - -// WriteTo implements the io.WriteTo interface. -func (r *safeWriteToReadCloser) WriteTo(w io.Writer) (int64, error) { - r.safeReadCloser.mtx.Lock() - defer r.safeReadCloser.mtx.Unlock() - - if r.safeReadCloser.closed { - return 0, io.EOF - } - - return r.safeReadCloser.readCloser.(io.WriterTo).WriteTo(w) -} - -// safeReadCloser wraps a io.ReadCloser and presents an io.ReadCloser interface. When Close is called on safeReadCloser -// the underlying Close method will be executed, and then the reference to the reader will be dropped. This type -// is meant to be used with the net/http library which will retain a reference to the request body for the lifetime -// of a goroutine connection. Wrapping in this manner will ensure that no data race conditions are falsely reported. -// This type is thread-safe. -type safeReadCloser struct { - readCloser io.ReadCloser - closed bool - mtx sync.Mutex -} - -// Read reads up to len(p) bytes into p from the underlying read. If the reader is closed io.EOF will be returned. -func (r *safeReadCloser) Read(p []byte) (n int, err error) { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.closed { - return 0, io.EOF - } - - return r.readCloser.Read(p) -} - -// Close calls the underlying io.ReadCloser's Close method, removes the reference to the reader, and returns any error -// reported from Close. Subsequent calls to Close will always return a nil error. -func (r *safeReadCloser) Close() error { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.closed { - return nil - } - - r.closed = true - rc := r.readCloser - r.readCloser = nil - return rc.Close() -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go deleted file mode 100644 index 5d6a4b23a2..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go +++ /dev/null @@ -1,25 +0,0 @@ -package http - -import ( - "crypto/md5" - "encoding/base64" - "fmt" - "io" -) - -// computeMD5Checksum computes base64 md5 checksum of an io.Reader's contents. -// Returns the byte slice of md5 checksum and an error. -func computeMD5Checksum(r io.Reader) ([]byte, error) { - h := md5.New() - // copy errors may be assumed to be from the body. - _, err := io.Copy(h, r) - if err != nil { - return nil, fmt.Errorf("failed to read body: %w", err) - } - - // encode the md5 checksum in base64. - sum := h.Sum(nil) - sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) - base64.StdEncoding.Encode(sum64, sum) - return sum64, nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go deleted file mode 100644 index 1d3b218a12..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go +++ /dev/null @@ -1,79 +0,0 @@ -package http - -import ( - "context" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - "io" - "io/ioutil" -) - -// AddErrorCloseResponseBodyMiddleware adds the middleware to automatically -// close the response body of an operation request if the request response -// failed. -func AddErrorCloseResponseBodyMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&errorCloseResponseBodyMiddleware{}, "OperationDeserializer", middleware.Before) -} - -type errorCloseResponseBodyMiddleware struct{} - -func (*errorCloseResponseBodyMiddleware) ID() string { - return "ErrorCloseResponseBody" -} - -func (m *errorCloseResponseBodyMiddleware) HandleDeserialize( - ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - output middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err := next.HandleDeserialize(ctx, input) - if err != nil { - if resp, ok := out.RawResponse.(*Response); ok && resp != nil && resp.Body != nil { - // Consume the full body to prevent TCP connection resets on some platforms - _, _ = io.Copy(ioutil.Discard, resp.Body) - // Do not validate that the response closes successfully. - resp.Body.Close() - } - } - - return out, metadata, err -} - -// AddCloseResponseBodyMiddleware adds the middleware to automatically close -// the response body of an operation request, after the response had been -// deserialized. -func AddCloseResponseBodyMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&closeResponseBody{}, "OperationDeserializer", middleware.Before) -} - -type closeResponseBody struct{} - -func (*closeResponseBody) ID() string { - return "CloseResponseBody" -} - -func (m *closeResponseBody) HandleDeserialize( - ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - output middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err := next.HandleDeserialize(ctx, input) - if err != nil { - return out, metadata, err - } - - if resp, ok := out.RawResponse.(*Response); ok { - // Consume the full body to prevent TCP connection resets on some platforms - _, copyErr := io.Copy(ioutil.Discard, resp.Body) - if copyErr != nil { - middleware.GetLogger(ctx).Logf(logging.Warn, "failed to discard remaining HTTP response body, this may affect connection reuse") - } - - closeErr := resp.Body.Close() - if closeErr != nil { - middleware.GetLogger(ctx).Logf(logging.Warn, "failed to close HTTP response body, this may affect connection reuse") - } - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go deleted file mode 100644 index 9969389bb2..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go +++ /dev/null @@ -1,84 +0,0 @@ -package http - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" -) - -// ComputeContentLength provides a middleware to set the content-length -// header for the length of a serialize request body. -type ComputeContentLength struct { -} - -// AddComputeContentLengthMiddleware adds ComputeContentLength to the middleware -// stack's Build step. -func AddComputeContentLengthMiddleware(stack *middleware.Stack) error { - return stack.Build.Add(&ComputeContentLength{}, middleware.After) -} - -// ID returns the identifier for the ComputeContentLength. -func (m *ComputeContentLength) ID() string { return "ComputeContentLength" } - -// HandleBuild adds the length of the serialized request to the HTTP header -// if the length can be determined. -func (m *ComputeContentLength) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - // do nothing if request content-length was set to 0 or above. - if req.ContentLength >= 0 { - return next.HandleBuild(ctx, in) - } - - // attempt to compute stream length - if n, ok, err := req.StreamLength(); err != nil { - return out, metadata, fmt.Errorf( - "failed getting length of request stream, %w", err) - } else if ok { - req.ContentLength = n - } - - return next.HandleBuild(ctx, in) -} - -// validateContentLength provides a middleware to validate the content-length -// is valid (greater than zero), for the serialized request payload. -type validateContentLength struct{} - -// ValidateContentLengthHeader adds middleware that validates request content-length -// is set to value greater than zero. -func ValidateContentLengthHeader(stack *middleware.Stack) error { - return stack.Build.Add(&validateContentLength{}, middleware.After) -} - -// ID returns the identifier for the ComputeContentLength. -func (m *validateContentLength) ID() string { return "ValidateContentLength" } - -// HandleBuild adds the length of the serialized request to the HTTP header -// if the length can be determined. -func (m *validateContentLength) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - // if request content-length was set to less than 0, return an error - if req.ContentLength < 0 { - return out, metadata, fmt.Errorf( - "content length for payload is required and must be at least 0") - } - - return next.HandleBuild(ctx, in) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go deleted file mode 100644 index 855c227203..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go +++ /dev/null @@ -1,81 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - - "github.com/aws/smithy-go/middleware" -) - -// WithHeaderComment instruments a middleware stack to append an HTTP field -// comment to the given header as specified in RFC 9110 -// (https://www.rfc-editor.org/rfc/rfc9110#name-comments). -// -// The header is case-insensitive. If the provided header exists when the -// middleware runs, the content will be inserted as-is enclosed in parentheses. -// -// Note that per the HTTP specification, comments are only allowed in fields -// containing "comment" as part of their field value definition, but this API -// will NOT verify whether the provided header is one of them. -// -// WithHeaderComment MAY be applied more than once to a middleware stack and/or -// more than once per header. -func WithHeaderComment(header, content string) func(*middleware.Stack) error { - return func(s *middleware.Stack) error { - m, err := getOrAddHeaderComment(s) - if err != nil { - return fmt.Errorf("get or add header comment: %v", err) - } - - m.values.Add(header, content) - return nil - } -} - -type headerCommentMiddleware struct { - values http.Header // hijack case-insensitive access APIs -} - -func (*headerCommentMiddleware) ID() string { - return "headerComment" -} - -func (m *headerCommentMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - r, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - for h, contents := range m.values { - for _, c := range contents { - if existing := r.Header.Get(h); existing != "" { - r.Header.Set(h, fmt.Sprintf("%s (%s)", existing, c)) - } - } - } - - return next.HandleBuild(ctx, in) -} - -func getOrAddHeaderComment(s *middleware.Stack) (*headerCommentMiddleware, error) { - id := (*headerCommentMiddleware)(nil).ID() - m, ok := s.Build.Get(id) - if !ok { - m := &headerCommentMiddleware{values: http.Header{}} - if err := s.Build.Add(m, middleware.After); err != nil { - return nil, fmt.Errorf("add build: %v", err) - } - - return m, nil - } - - hc, ok := m.(*headerCommentMiddleware) - if !ok { - return nil, fmt.Errorf("existing middleware w/ id %s is not *headerCommentMiddleware", id) - } - - return hc, nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go deleted file mode 100644 index eac32b4bab..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go +++ /dev/null @@ -1,167 +0,0 @@ -package http - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" -) - -type isContentTypeAutoSet struct{} - -// SetIsContentTypeDefaultValue returns a Context specifying if the request's -// content-type header was set to a default value. -func SetIsContentTypeDefaultValue(ctx context.Context, isDefault bool) context.Context { - return context.WithValue(ctx, isContentTypeAutoSet{}, isDefault) -} - -// GetIsContentTypeDefaultValue returns if the content-type HTTP header on the -// request is a default value that was auto assigned by an operation -// serializer. Allows middleware post serialization to know if the content-type -// was auto set to a default value or not. -// -// Also returns false if the Context value was never updated to include if -// content-type was set to a default value. -func GetIsContentTypeDefaultValue(ctx context.Context) bool { - v, _ := ctx.Value(isContentTypeAutoSet{}).(bool) - return v -} - -// AddNoPayloadDefaultContentTypeRemover Adds the DefaultContentTypeRemover -// middleware to the stack after the operation serializer. This middleware will -// remove the content-type header from the request if it was set as a default -// value, and no request payload is present. -// -// Returns error if unable to add the middleware. -func AddNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) { - err = stack.Serialize.Insert(removeDefaultContentType{}, - "OperationSerializer", middleware.After) - if err != nil { - return fmt.Errorf("failed to add %s serialize middleware, %w", - removeDefaultContentType{}.ID(), err) - } - - return nil -} - -// RemoveNoPayloadDefaultContentTypeRemover removes the -// DefaultContentTypeRemover middleware from the stack. Returns an error if -// unable to remove the middleware. -func RemoveNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) { - _, err = stack.Serialize.Remove(removeDefaultContentType{}.ID()) - if err != nil { - return fmt.Errorf("failed to remove %s serialize middleware, %w", - removeDefaultContentType{}.ID(), err) - - } - return nil -} - -// removeDefaultContentType provides after serialization middleware that will -// remove the content-type header from an HTTP request if the header was set as -// a default value by the operation serializer, and there is no request payload. -type removeDefaultContentType struct{} - -// ID returns the middleware ID -func (removeDefaultContentType) ID() string { return "RemoveDefaultContentType" } - -// HandleSerialize implements the serialization middleware. -func (removeDefaultContentType) HandleSerialize( - ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, meta middleware.Metadata, err error, -) { - req, ok := input.Request.(*Request) - if !ok { - return out, meta, fmt.Errorf( - "unexpected request type %T for removeDefaultContentType middleware", - input.Request) - } - - if GetIsContentTypeDefaultValue(ctx) && req.GetStream() == nil { - req.Header.Del("Content-Type") - input.Request = req - } - - return next.HandleSerialize(ctx, input) -} - -type headerValue struct { - header string - value string - append bool -} - -type headerValueHelper struct { - headerValues []headerValue -} - -func (h *headerValueHelper) addHeaderValue(value headerValue) { - h.headerValues = append(h.headerValues, value) -} - -func (h *headerValueHelper) ID() string { - return "HTTPHeaderHelper" -} - -func (h *headerValueHelper) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (out middleware.BuildOutput, metadata middleware.Metadata, err error) { - req, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - for _, value := range h.headerValues { - if value.append { - req.Header.Add(value.header, value.value) - } else { - req.Header.Set(value.header, value.value) - } - } - - return next.HandleBuild(ctx, in) -} - -func getOrAddHeaderValueHelper(stack *middleware.Stack) (*headerValueHelper, error) { - id := (*headerValueHelper)(nil).ID() - m, ok := stack.Build.Get(id) - if !ok { - m = &headerValueHelper{} - err := stack.Build.Add(m, middleware.After) - if err != nil { - return nil, err - } - } - - requestUserAgent, ok := m.(*headerValueHelper) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", m, id) - } - - return requestUserAgent, nil -} - -// AddHeaderValue returns a stack mutator that adds the header value pair to header. -// Appends to any existing values if present. -func AddHeaderValue(header string, value string) func(stack *middleware.Stack) error { - return func(stack *middleware.Stack) error { - helper, err := getOrAddHeaderValueHelper(stack) - if err != nil { - return err - } - helper.addHeaderValue(headerValue{header: header, value: value, append: true}) - return nil - } -} - -// SetHeaderValue returns a stack mutator that adds the header value pair to header. -// Replaces any existing values if present. -func SetHeaderValue(header string, value string) func(stack *middleware.Stack) error { - return func(stack *middleware.Stack) error { - helper, err := getOrAddHeaderValueHelper(stack) - if err != nil { - return err - } - helper.addHeaderValue(headerValue{header: header, value: value, append: false}) - return nil - } -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go deleted file mode 100644 index d5909b0a24..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go +++ /dev/null @@ -1,75 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http/httputil" - - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// RequestResponseLogger is a deserialize middleware that will log the request and response HTTP messages and optionally -// their respective bodies. Will not perform any logging if none of the options are set. -type RequestResponseLogger struct { - LogRequest bool - LogRequestWithBody bool - - LogResponse bool - LogResponseWithBody bool -} - -// ID is the middleware identifier. -func (r *RequestResponseLogger) ID() string { - return "RequestResponseLogger" -} - -// HandleDeserialize will log the request and response HTTP messages if configured accordingly. -func (r *RequestResponseLogger) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - logger := middleware.GetLogger(ctx) - - if r.LogRequest || r.LogRequestWithBody { - smithyRequest, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in) - } - - rc := smithyRequest.Build(ctx) - reqBytes, err := httputil.DumpRequestOut(rc, r.LogRequestWithBody) - if err != nil { - return out, metadata, err - } - - logger.Logf(logging.Debug, "Request\n%v", string(reqBytes)) - - if r.LogRequestWithBody { - smithyRequest, err = smithyRequest.SetStream(rc.Body) - if err != nil { - return out, metadata, err - } - in.Request = smithyRequest - } - } - - out, metadata, err = next.HandleDeserialize(ctx, in) - - if (err == nil) && (r.LogResponse || r.LogResponseWithBody) { - smithyResponse, ok := out.RawResponse.(*Response) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse) - } - - respBytes, err := httputil.DumpResponse(smithyResponse.Response, r.LogResponseWithBody) - if err != nil { - return out, metadata, fmt.Errorf("failed to dump response %w", err) - } - - logger.Logf(logging.Debug, "Response\n%v", string(respBytes)) - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go deleted file mode 100644 index d6079b2595..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go +++ /dev/null @@ -1,51 +0,0 @@ -package http - -import ( - "context" - - "github.com/aws/smithy-go/middleware" -) - -type ( - hostnameImmutableKey struct{} - hostPrefixDisableKey struct{} -) - -// GetHostnameImmutable retrieves whether the endpoint hostname should be considered -// immutable or not. -// -// Scoped to stack values. Use middleware#ClearStackValues to clear all stack -// values. -func GetHostnameImmutable(ctx context.Context) (v bool) { - v, _ = middleware.GetStackValue(ctx, hostnameImmutableKey{}).(bool) - return v -} - -// SetHostnameImmutable sets or modifies whether the request's endpoint hostname -// should be considered immutable or not. -// -// Scoped to stack values. Use middleware#ClearStackValues to clear all stack -// values. -func SetHostnameImmutable(ctx context.Context, value bool) context.Context { - return middleware.WithStackValue(ctx, hostnameImmutableKey{}, value) -} - -// IsEndpointHostPrefixDisabled retrieves whether the hostname prefixing is -// disabled. -// -// Scoped to stack values. Use middleware#ClearStackValues to clear all stack -// values. -func IsEndpointHostPrefixDisabled(ctx context.Context) (v bool) { - v, _ = middleware.GetStackValue(ctx, hostPrefixDisableKey{}).(bool) - return v -} - -// DisableEndpointHostPrefix sets or modifies whether the request's endpoint host -// prefixing should be disabled. If value is true, endpoint host prefixing -// will be disabled. -// -// Scoped to stack values. Use middleware#ClearStackValues to clear all stack -// values. -func DisableEndpointHostPrefix(ctx context.Context, value bool) context.Context { - return middleware.WithStackValue(ctx, hostPrefixDisableKey{}, value) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go deleted file mode 100644 index 326cb8a6ca..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go +++ /dev/null @@ -1,79 +0,0 @@ -package http - -import ( - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - "strings" -) - -// MinimumProtocolError is an error type indicating that the established connection did not meet the expected minimum -// HTTP protocol version. -type MinimumProtocolError struct { - proto string - expectedProtoMajor int - expectedProtoMinor int -} - -// Error returns the error message. -func (m *MinimumProtocolError) Error() string { - return fmt.Sprintf("operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", - m.expectedProtoMajor, m.expectedProtoMinor, m.proto) -} - -// RequireMinimumProtocol is a deserialization middleware that asserts that the established HTTP connection -// meets the minimum major ad minor version. -type RequireMinimumProtocol struct { - ProtoMajor int - ProtoMinor int -} - -// AddRequireMinimumProtocol adds the RequireMinimumProtocol middleware to the stack using the provided minimum -// protocol major and minor version. -func AddRequireMinimumProtocol(stack *middleware.Stack, major, minor int) error { - return stack.Deserialize.Insert(&RequireMinimumProtocol{ - ProtoMajor: major, - ProtoMinor: minor, - }, "OperationDeserializer", middleware.Before) -} - -// ID returns the middleware identifier string. -func (r *RequireMinimumProtocol) ID() string { - return "RequireMinimumProtocol" -} - -// HandleDeserialize asserts that the established connection is a HTTP connection with the minimum major and minor -// protocol version. -func (r *RequireMinimumProtocol) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*Response) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse) - } - - if !strings.HasPrefix(response.Proto, "HTTP") { - return out, metadata, &MinimumProtocolError{ - proto: response.Proto, - expectedProtoMajor: r.ProtoMajor, - expectedProtoMinor: r.ProtoMinor, - } - } - - if response.ProtoMajor < r.ProtoMajor || response.ProtoMinor < r.ProtoMinor { - return out, metadata, &MinimumProtocolError{ - proto: response.Proto, - expectedProtoMajor: r.ProtoMajor, - expectedProtoMinor: r.ProtoMinor, - } - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/properties.go b/vendor/github.com/aws/smithy-go/transport/http/properties.go deleted file mode 100644 index c65aa39320..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/properties.go +++ /dev/null @@ -1,80 +0,0 @@ -package http - -import smithy "github.com/aws/smithy-go" - -type ( - sigV4SigningNameKey struct{} - sigV4SigningRegionKey struct{} - - sigV4ASigningNameKey struct{} - sigV4ASigningRegionsKey struct{} - - isUnsignedPayloadKey struct{} - disableDoubleEncodingKey struct{} -) - -// GetSigV4SigningName gets the signing name from Properties. -func GetSigV4SigningName(p *smithy.Properties) (string, bool) { - v, ok := p.Get(sigV4SigningNameKey{}).(string) - return v, ok -} - -// SetSigV4SigningName sets the signing name on Properties. -func SetSigV4SigningName(p *smithy.Properties, name string) { - p.Set(sigV4SigningNameKey{}, name) -} - -// GetSigV4SigningRegion gets the signing region from Properties. -func GetSigV4SigningRegion(p *smithy.Properties) (string, bool) { - v, ok := p.Get(sigV4SigningRegionKey{}).(string) - return v, ok -} - -// SetSigV4SigningRegion sets the signing region on Properties. -func SetSigV4SigningRegion(p *smithy.Properties, region string) { - p.Set(sigV4SigningRegionKey{}, region) -} - -// GetSigV4ASigningName gets the v4a signing name from Properties. -func GetSigV4ASigningName(p *smithy.Properties) (string, bool) { - v, ok := p.Get(sigV4ASigningNameKey{}).(string) - return v, ok -} - -// SetSigV4ASigningName sets the signing name on Properties. -func SetSigV4ASigningName(p *smithy.Properties, name string) { - p.Set(sigV4ASigningNameKey{}, name) -} - -// GetSigV4ASigningRegion gets the v4a signing region set from Properties. -func GetSigV4ASigningRegions(p *smithy.Properties) ([]string, bool) { - v, ok := p.Get(sigV4ASigningRegionsKey{}).([]string) - return v, ok -} - -// SetSigV4ASigningRegions sets the v4a signing region set on Properties. -func SetSigV4ASigningRegions(p *smithy.Properties, regions []string) { - p.Set(sigV4ASigningRegionsKey{}, regions) -} - -// GetIsUnsignedPayload gets whether the payload is unsigned from Properties. -func GetIsUnsignedPayload(p *smithy.Properties) (bool, bool) { - v, ok := p.Get(isUnsignedPayloadKey{}).(bool) - return v, ok -} - -// SetIsUnsignedPayload sets whether the payload is unsigned on Properties. -func SetIsUnsignedPayload(p *smithy.Properties, isUnsignedPayload bool) { - p.Set(isUnsignedPayloadKey{}, isUnsignedPayload) -} - -// GetDisableDoubleEncoding gets whether the payload is unsigned from Properties. -func GetDisableDoubleEncoding(p *smithy.Properties) (bool, bool) { - v, ok := p.Get(disableDoubleEncodingKey{}).(bool) - return v, ok -} - -// SetDisableDoubleEncoding sets whether the payload is unsigned on Properties. -func SetDisableDoubleEncoding(p *smithy.Properties, disableDoubleEncoding bool) { - p.Set(disableDoubleEncodingKey{}, disableDoubleEncoding) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/request.go b/vendor/github.com/aws/smithy-go/transport/http/request.go deleted file mode 100644 index 7177d6f957..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/request.go +++ /dev/null @@ -1,189 +0,0 @@ -package http - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - iointernal "github.com/aws/smithy-go/transport/http/internal/io" -) - -// Request provides the HTTP specific request structure for HTTP specific -// middleware steps to use to serialize input, and send an operation's request. -type Request struct { - *http.Request - stream io.Reader - isStreamSeekable bool - streamStartPos int64 -} - -// NewStackRequest returns an initialized request ready to be populated with the -// HTTP request details. Returns empty interface so the function can be used as -// a parameter to the Smithy middleware Stack constructor. -func NewStackRequest() interface{} { - return &Request{ - Request: &http.Request{ - URL: &url.URL{}, - Header: http.Header{}, - ContentLength: -1, // default to unknown length - }, - } -} - -// IsHTTPS returns if the request is HTTPS. Returns false if no endpoint URL is set. -func (r *Request) IsHTTPS() bool { - if r.URL == nil { - return false - } - return strings.EqualFold(r.URL.Scheme, "https") -} - -// Clone returns a deep copy of the Request for the new context. A reference to -// the Stream is copied, but the underlying stream is not copied. -func (r *Request) Clone() *Request { - rc := *r - rc.Request = rc.Request.Clone(context.TODO()) - return &rc -} - -// StreamLength returns the number of bytes of the serialized stream attached -// to the request and ok set. If the length cannot be determined, an error will -// be returned. -func (r *Request) StreamLength() (size int64, ok bool, err error) { - return streamLength(r.stream, r.isStreamSeekable, r.streamStartPos) -} - -func streamLength(stream io.Reader, seekable bool, startPos int64) (size int64, ok bool, err error) { - if stream == nil { - return 0, true, nil - } - - if l, ok := stream.(interface{ Len() int }); ok { - return int64(l.Len()), true, nil - } - - if !seekable { - return 0, false, nil - } - - s := stream.(io.Seeker) - endOffset, err := s.Seek(0, io.SeekEnd) - if err != nil { - return 0, false, err - } - - // The reason to seek to streamStartPos instead of 0 is to ensure that the - // SDK only sends the stream from the starting position the user's - // application provided it to the SDK at. For example application opens a - // file, and wants to skip the first N bytes uploading the rest. The - // application would move the file's offset N bytes, then hand it off to - // the SDK to send the remaining. The SDK should respect that initial offset. - _, err = s.Seek(startPos, io.SeekStart) - if err != nil { - return 0, false, err - } - - return endOffset - startPos, true, nil -} - -// RewindStream will rewind the io.Reader to the relative start position if it -// is an io.Seeker. -func (r *Request) RewindStream() error { - // If there is no stream there is nothing to rewind. - if r.stream == nil { - return nil - } - - if !r.isStreamSeekable { - return fmt.Errorf("request stream is not seekable") - } - _, err := r.stream.(io.Seeker).Seek(r.streamStartPos, io.SeekStart) - return err -} - -// GetStream returns the request stream io.Reader if a stream is set. If no -// stream is present nil will be returned. -func (r *Request) GetStream() io.Reader { - return r.stream -} - -// IsStreamSeekable returns whether the stream is seekable. -func (r *Request) IsStreamSeekable() bool { - return r.isStreamSeekable -} - -// SetStream returns a clone of the request with the stream set to the provided -// reader. May return an error if the provided reader is seekable but returns -// an error. -func (r *Request) SetStream(reader io.Reader) (rc *Request, err error) { - rc = r.Clone() - - if reader == http.NoBody { - reader = nil - } - - var isStreamSeekable bool - var streamStartPos int64 - switch v := reader.(type) { - case io.Seeker: - n, err := v.Seek(0, io.SeekCurrent) - if err != nil { - return r, err - } - isStreamSeekable = true - streamStartPos = n - default: - // If the stream length can be determined, and is determined to be empty, - // use a nil stream to prevent confusion between empty vs not-empty - // streams. - length, ok, err := streamLength(reader, false, 0) - if err != nil { - return nil, err - } else if ok && length == 0 { - reader = nil - } - } - - rc.stream = reader - rc.isStreamSeekable = isStreamSeekable - rc.streamStartPos = streamStartPos - - return rc, err -} - -// Build returns a build standard HTTP request value from the Smithy request. -// The request's stream is wrapped in a safe container that allows it to be -// reused for subsequent attempts. -func (r *Request) Build(ctx context.Context) *http.Request { - req := r.Request.Clone(ctx) - - if r.stream == nil && req.ContentLength == -1 { - req.ContentLength = 0 - } - - switch stream := r.stream.(type) { - case *io.PipeReader: - req.Body = ioutil.NopCloser(stream) - req.ContentLength = -1 - default: - // HTTP Client Request must only have a non-nil body if the - // ContentLength is explicitly unknown (-1) or non-zero. The HTTP - // Client will interpret a non-nil body and ContentLength 0 as - // "unknown". This is unwanted behavior. - if req.ContentLength != 0 && r.stream != nil { - req.Body = iointernal.NewSafeReadCloser(ioutil.NopCloser(stream)) - } - } - - return req -} - -// RequestCloner is a function that can take an input request type and clone the request -// for use in a subsequent retry attempt. -func RequestCloner(v interface{}) interface{} { - return v.(*Request).Clone() -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/response.go b/vendor/github.com/aws/smithy-go/transport/http/response.go deleted file mode 100644 index 0c13bfcc8e..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/response.go +++ /dev/null @@ -1,34 +0,0 @@ -package http - -import ( - "fmt" - "net/http" -) - -// Response provides the HTTP specific response structure for HTTP specific -// middleware steps to use to deserialize the response from an operation call. -type Response struct { - *http.Response -} - -// ResponseError provides the HTTP centric error type wrapping the underlying -// error with the HTTP response value. -type ResponseError struct { - Response *Response - Err error -} - -// HTTPStatusCode returns the HTTP response status code received from the service. -func (e *ResponseError) HTTPStatusCode() int { return e.Response.StatusCode } - -// HTTPResponse returns the HTTP response received from the service. -func (e *ResponseError) HTTPResponse() *Response { return e.Response } - -// Unwrap returns the nested error if any, or nil. -func (e *ResponseError) Unwrap() error { return e.Err } - -func (e *ResponseError) Error() string { - return fmt.Sprintf( - "http response error StatusCode: %d, %v", - e.Response.StatusCode, e.Err) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/time.go b/vendor/github.com/aws/smithy-go/transport/http/time.go deleted file mode 100644 index 607b196a8b..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/time.go +++ /dev/null @@ -1,13 +0,0 @@ -package http - -import ( - "time" - - smithytime "github.com/aws/smithy-go/time" -) - -// ParseTime parses a time string like the HTTP Date header. This uses a more -// relaxed rule set for date parsing compared to the standard library. -func ParseTime(text string) (t time.Time, err error) { - return smithytime.ParseHTTPDate(text) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/url.go b/vendor/github.com/aws/smithy-go/transport/http/url.go deleted file mode 100644 index 60a5fc1002..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/url.go +++ /dev/null @@ -1,44 +0,0 @@ -package http - -import "strings" - -// JoinPath returns an absolute URL path composed of the two paths provided. -// Enforces that the returned path begins with '/'. If added path is empty the -// returned path suffix will match the first parameter suffix. -func JoinPath(a, b string) string { - if len(a) == 0 { - a = "/" - } else if a[0] != '/' { - a = "/" + a - } - - if len(b) != 0 && b[0] == '/' { - b = b[1:] - } - - if len(b) != 0 && len(a) > 1 && a[len(a)-1] != '/' { - a = a + "/" - } - - return a + b -} - -// JoinRawQuery returns an absolute raw query expression. Any duplicate '&' -// will be collapsed to single separator between values. -func JoinRawQuery(a, b string) string { - a = strings.TrimFunc(a, isAmpersand) - b = strings.TrimFunc(b, isAmpersand) - - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } - - return a + "&" + b -} - -func isAmpersand(v rune) bool { - return v == '&' -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/user_agent.go b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go deleted file mode 100644 index 71a7e0d8af..0000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/user_agent.go +++ /dev/null @@ -1,37 +0,0 @@ -package http - -import ( - "strings" -) - -// UserAgentBuilder is a builder for a HTTP User-Agent string. -type UserAgentBuilder struct { - sb strings.Builder -} - -// NewUserAgentBuilder returns a new UserAgentBuilder. -func NewUserAgentBuilder() *UserAgentBuilder { - return &UserAgentBuilder{sb: strings.Builder{}} -} - -// AddKey adds the named component/product to the agent string -func (u *UserAgentBuilder) AddKey(key string) { - u.appendTo(key) -} - -// AddKeyValue adds the named key to the agent string with the given value. -func (u *UserAgentBuilder) AddKeyValue(key, value string) { - u.appendTo(key + "/" + value) -} - -// Build returns the constructed User-Agent string. May be called multiple times. -func (u *UserAgentBuilder) Build() string { - return u.sb.String() -} - -func (u *UserAgentBuilder) appendTo(value string) { - if u.sb.Len() > 0 { - u.sb.WriteRune(' ') - } - u.sb.WriteString(value) -} diff --git a/vendor/github.com/aws/smithy-go/validation.go b/vendor/github.com/aws/smithy-go/validation.go deleted file mode 100644 index b5eedc1f90..0000000000 --- a/vendor/github.com/aws/smithy-go/validation.go +++ /dev/null @@ -1,140 +0,0 @@ -package smithy - -import ( - "bytes" - "fmt" - "strings" -) - -// An InvalidParamsError provides wrapping of invalid parameter errors found when -// validating API operation input parameters. -type InvalidParamsError struct { - // Context is the base context of the invalid parameter group. - Context string - errs []InvalidParamError -} - -// Add adds a new invalid parameter error to the collection of invalid -// parameters. The context of the invalid parameter will be updated to reflect -// this collection. -func (e *InvalidParamsError) Add(err InvalidParamError) { - err.SetContext(e.Context) - e.errs = append(e.errs, err) -} - -// AddNested adds the invalid parameter errors from another InvalidParamsError -// value into this collection. The nested errors will have their nested context -// updated and base context to reflect the merging. -// -// Use for nested validations errors. -func (e *InvalidParamsError) AddNested(nestedCtx string, nested InvalidParamsError) { - for _, err := range nested.errs { - err.SetContext(e.Context) - err.AddNestedContext(nestedCtx) - e.errs = append(e.errs, err) - } -} - -// Len returns the number of invalid parameter errors -func (e *InvalidParamsError) Len() int { - return len(e.errs) -} - -// Error returns the string formatted form of the invalid parameters. -func (e InvalidParamsError) Error() string { - w := &bytes.Buffer{} - fmt.Fprintf(w, "%d validation error(s) found.\n", len(e.errs)) - - for _, err := range e.errs { - fmt.Fprintf(w, "- %s\n", err.Error()) - } - - return w.String() -} - -// Errs returns a slice of the invalid parameters -func (e InvalidParamsError) Errs() []error { - errs := make([]error, len(e.errs)) - for i := 0; i < len(errs); i++ { - errs[i] = e.errs[i] - } - - return errs -} - -// An InvalidParamError represents an invalid parameter error type. -type InvalidParamError interface { - error - - // Field name the error occurred on. - Field() string - - // SetContext updates the context of the error. - SetContext(string) - - // AddNestedContext updates the error's context to include a nested level. - AddNestedContext(string) -} - -type invalidParamError struct { - context string - nestedContext string - field string - reason string -} - -// Error returns the string version of the invalid parameter error. -func (e invalidParamError) Error() string { - return fmt.Sprintf("%s, %s.", e.reason, e.Field()) -} - -// Field Returns the field and context the error occurred. -func (e invalidParamError) Field() string { - sb := &strings.Builder{} - sb.WriteString(e.context) - if sb.Len() > 0 { - if len(e.nestedContext) == 0 || (len(e.nestedContext) > 0 && e.nestedContext[:1] != "[") { - sb.WriteRune('.') - } - } - if len(e.nestedContext) > 0 { - sb.WriteString(e.nestedContext) - sb.WriteRune('.') - } - sb.WriteString(e.field) - return sb.String() -} - -// SetContext updates the base context of the error. -func (e *invalidParamError) SetContext(ctx string) { - e.context = ctx -} - -// AddNestedContext prepends a context to the field's path. -func (e *invalidParamError) AddNestedContext(ctx string) { - if len(e.nestedContext) == 0 { - e.nestedContext = ctx - return - } - // Check if our nested context is an index into a slice or map - if e.nestedContext[:1] != "[" { - e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) - return - } - e.nestedContext = ctx + e.nestedContext -} - -// An ParamRequiredError represents an required parameter error. -type ParamRequiredError struct { - invalidParamError -} - -// NewErrParamRequired creates a new required parameter error. -func NewErrParamRequired(field string) *ParamRequiredError { - return &ParamRequiredError{ - invalidParamError{ - field: field, - reason: fmt.Sprintf("missing required field"), - }, - } -} diff --git a/vendor/github.com/aws/smithy-go/waiter/logger.go b/vendor/github.com/aws/smithy-go/waiter/logger.go deleted file mode 100644 index 8d70a03ff2..0000000000 --- a/vendor/github.com/aws/smithy-go/waiter/logger.go +++ /dev/null @@ -1,36 +0,0 @@ -package waiter - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// Logger is the Logger middleware used by the waiter to log an attempt -type Logger struct { - // Attempt is the current attempt to be logged - Attempt int64 -} - -// ID representing the Logger middleware -func (*Logger) ID() string { - return "WaiterLogger" -} - -// HandleInitialize performs handling of request in initialize stack step -func (m *Logger) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - logger := middleware.GetLogger(ctx) - - logger.Logf(logging.Debug, fmt.Sprintf("attempting waiter request, attempt count: %d", m.Attempt)) - - return next.HandleInitialize(ctx, in) -} - -// AddLogger is a helper util to add waiter logger after `SetLogger` middleware in -func (m Logger) AddLogger(stack *middleware.Stack) error { - return stack.Initialize.Insert(&m, "SetLogger", middleware.After) -} diff --git a/vendor/github.com/aws/smithy-go/waiter/waiter.go b/vendor/github.com/aws/smithy-go/waiter/waiter.go deleted file mode 100644 index 03e46e2ee7..0000000000 --- a/vendor/github.com/aws/smithy-go/waiter/waiter.go +++ /dev/null @@ -1,66 +0,0 @@ -package waiter - -import ( - "fmt" - "math" - "time" - - "github.com/aws/smithy-go/rand" -) - -// ComputeDelay computes delay between waiter attempts. The function takes in a current attempt count, -// minimum delay, maximum delay, and remaining wait time for waiter as input. The inputs minDelay and maxDelay -// must always be greater than 0, along with minDelay lesser than or equal to maxDelay. -// -// Returns the computed delay and if next attempt count is possible within the given input time constraints. -// Note that the zeroth attempt results in no delay. -func ComputeDelay(attempt int64, minDelay, maxDelay, remainingTime time.Duration) (delay time.Duration, err error) { - // zeroth attempt, no delay - if attempt <= 0 { - return 0, nil - } - - // remainingTime is zero or less, no delay - if remainingTime <= 0 { - return 0, nil - } - - // validate min delay is greater than 0 - if minDelay == 0 { - return 0, fmt.Errorf("minDelay must be greater than zero when computing Delay") - } - - // validate max delay is greater than 0 - if maxDelay == 0 { - return 0, fmt.Errorf("maxDelay must be greater than zero when computing Delay") - } - - // Get attempt ceiling to prevent integer overflow. - attemptCeiling := (math.Log(float64(maxDelay/minDelay)) / math.Log(2)) + 1 - - if attempt > int64(attemptCeiling) { - delay = maxDelay - } else { - // Compute exponential delay based on attempt. - ri := 1 << uint64(attempt-1) - // compute delay - delay = minDelay * time.Duration(ri) - } - - if delay != minDelay { - // randomize to get jitter between min delay and delay value - d, err := rand.CryptoRandInt63n(int64(delay - minDelay)) - if err != nil { - return 0, fmt.Errorf("error computing retry jitter, %w", err) - } - - delay = time.Duration(d) + minDelay - } - - // check if this is the last attempt possible and compute delay accordingly - if remainingTime-delay <= minDelay { - delay = remainingTime - minDelay - } - - return delay, nil -} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/LICENSE b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/LICENSE deleted file mode 100644 index bb3f49d69b..0000000000 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/LICENSE +++ /dev/null @@ -1,51 +0,0 @@ -Apache License - -Version 2.0, January 2004 - -http://www.apache.org/licenses/ -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -1. Definitions. -"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. -"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. -"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. -"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. -"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. -"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. -"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). -"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. -"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. -2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. -3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. -4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: -1. You must give any other recipients of the Work or Derivative Works a copy of this License; and -2. You must cause any modified files to carry prominent notices stating that You changed the files; and -3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and -4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. - -You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. -5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. -6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. -7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. -8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. -9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. -END OF TERMS AND CONDITIONS -APPENDIX: How to apply the Apache License to your work -To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*** - -Note: Other license terms may apply to certain, identified software files contained within or distributed with the accompanying software if such terms are included in the directory containing the accompanying software. Such other license terms will then apply in lieu of the terms of the software license above. diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/client.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/client.go deleted file mode 100644 index eb49fc87b4..0000000000 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/client.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package api - -import ( - "context" - "encoding/base64" - "fmt" - "net/url" - "regexp" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/ecr" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic" - "github.com/sirupsen/logrus" - - "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache" -) - -const ( - proxyEndpointScheme = "https://" - programName = "docker-credential-ecr-login" - ecrPublicName = "public.ecr.aws" - ecrPublicEndpoint = proxyEndpointScheme + ecrPublicName -) - -var ecrPattern = regexp.MustCompile(`^(\d{12})\.dkr\.ecr(\-fips)?\.([a-zA-Z0-9][a-zA-Z0-9-_]*)\.(amazonaws\.com(\.cn)?|sc2s\.sgov\.gov|c2s\.ic\.gov)$`) - -type Service string - -const ( - ServiceECR Service = "ecr" - ServiceECRPublic Service = "ecr-public" -) - -// Registry in ECR -type Registry struct { - Service Service - ID string - FIPS bool - Region string -} - -// ExtractRegistry returns the ECR registry behind a given service endpoint -func ExtractRegistry(input string) (*Registry, error) { - if strings.HasPrefix(input, proxyEndpointScheme) { - input = strings.TrimPrefix(input, proxyEndpointScheme) - } - serverURL, err := url.Parse(proxyEndpointScheme + input) - if err != nil { - return nil, err - } - if serverURL.Hostname() == ecrPublicName { - return &Registry{ - Service: ServiceECRPublic, - }, nil - } - matches := ecrPattern.FindStringSubmatch(serverURL.Hostname()) - if len(matches) == 0 { - return nil, fmt.Errorf(programName + " can only be used with Amazon Elastic Container Registry.") - } else if len(matches) < 3 { - return nil, fmt.Errorf("%q is not a valid repository URI for Amazon Elastic Container Registry.", input) - } - return &Registry{ - Service: ServiceECR, - ID: matches[1], - FIPS: matches[2] == "-fips", - Region: matches[3], - }, nil -} - -// Client used for calling ECR service -type Client interface { - GetCredentials(serverURL string) (*Auth, error) - GetCredentialsByRegistryID(registryID string) (*Auth, error) - ListCredentials() ([]*Auth, error) -} - -// Auth credentials returned by ECR service to allow docker login -type Auth struct { - ProxyEndpoint string - Username string - Password string -} - -type defaultClient struct { - ecrClient ECRAPI - ecrPublicClient ECRPublicAPI - credentialCache cache.CredentialsCache -} - -type ECRAPI interface { - GetAuthorizationToken(context.Context, *ecr.GetAuthorizationTokenInput, ...func(*ecr.Options)) (*ecr.GetAuthorizationTokenOutput, error) -} - -type ECRPublicAPI interface { - GetAuthorizationToken(context.Context, *ecrpublic.GetAuthorizationTokenInput, ...func(*ecrpublic.Options)) (*ecrpublic.GetAuthorizationTokenOutput, error) -} - -// GetCredentials returns username, password, and proxyEndpoint -func (c *defaultClient) GetCredentials(serverURL string) (*Auth, error) { - registry, err := ExtractRegistry(serverURL) - if err != nil { - return nil, err - } - logrus. - WithField("service", registry.Service). - WithField("registry", registry.ID). - WithField("region", registry.Region). - WithField("serverURL", serverURL). - Debug("Retrieving credentials") - switch registry.Service { - case ServiceECR: - return c.GetCredentialsByRegistryID(registry.ID) - case ServiceECRPublic: - return c.GetPublicCredentials() - } - return nil, fmt.Errorf("unknown service %q", registry.Service) -} - -// GetCredentialsByRegistryID returns username, password, and proxyEndpoint -func (c *defaultClient) GetCredentialsByRegistryID(registryID string) (*Auth, error) { - cachedEntry := c.credentialCache.Get(registryID) - if cachedEntry != nil { - if cachedEntry.IsValid(time.Now()) { - logrus.WithField("registry", registryID).Debug("Using cached token") - return extractToken(cachedEntry.AuthorizationToken, cachedEntry.ProxyEndpoint) - } - logrus. - WithField("requestedAt", cachedEntry.RequestedAt). - WithField("expiresAt", cachedEntry.ExpiresAt). - Debug("Cached token is no longer valid") - } - - auth, err := c.getAuthorizationToken(registryID) - - // if we have a cached token, fall back to avoid failing the request. This may result an expired token - // being returned, but if there is a 500 or timeout from the service side, we'd like to attempt to re-use an - // old token. We invalidate tokens prior to their expiration date to help mitigate this scenario. - if err != nil && cachedEntry != nil { - logrus.WithError(err).Info("Got error fetching authorization token. Falling back to cached token.") - return extractToken(cachedEntry.AuthorizationToken, cachedEntry.ProxyEndpoint) - } - return auth, err -} - -func (c *defaultClient) GetPublicCredentials() (*Auth, error) { - cachedEntry := c.credentialCache.GetPublic() - if cachedEntry != nil { - if cachedEntry.IsValid(time.Now()) { - logrus.WithField("registry", ecrPublicName).Debug("Using cached token") - return extractToken(cachedEntry.AuthorizationToken, cachedEntry.ProxyEndpoint) - } - logrus. - WithField("requestedAt", cachedEntry.RequestedAt). - WithField("expiresAt", cachedEntry.ExpiresAt). - Debug("Cached token is no longer valid") - } - - auth, err := c.getPublicAuthorizationToken() - // if we have a cached token, fall back to avoid failing the request. This may result an expired token - // being returned, but if there is a 500 or timeout from the service side, we'd like to attempt to re-use an - // old token. We invalidate tokens prior to their expiration date to help mitigate this scenario. - if err != nil && cachedEntry != nil { - logrus.WithError(err).Info("Got error fetching authorization token. Falling back to cached token.") - return extractToken(cachedEntry.AuthorizationToken, cachedEntry.ProxyEndpoint) - } - return auth, err -} - -func (c *defaultClient) ListCredentials() ([]*Auth, error) { - // prime the cache with default authorization tokens - _, err := c.GetCredentialsByRegistryID("") - if err != nil { - logrus.WithError(err).Debug("couldn't get authorization token for default registry") - } - _, err = c.GetPublicCredentials() - if err != nil { - logrus.WithError(err).Debug("couldn't get authorization token for public registry") - } - - auths := make([]*Auth, 0) - for _, authEntry := range c.credentialCache.List() { - auth, err := extractToken(authEntry.AuthorizationToken, authEntry.ProxyEndpoint) - if err != nil { - logrus.WithError(err).Debug("Could not extract token") - } else { - auths = append(auths, auth) - } - } - - return auths, nil -} - -func (c *defaultClient) getAuthorizationToken(registryID string) (*Auth, error) { - var input *ecr.GetAuthorizationTokenInput - if registryID == "" { - logrus.Debug("Calling ECR.GetAuthorizationToken for default registry") - input = &ecr.GetAuthorizationTokenInput{} - } else { - logrus.WithField("registry", registryID).Debug("Calling ECR.GetAuthorizationToken") - input = &ecr.GetAuthorizationTokenInput{ - RegistryIds: []string{registryID}, - } - } - - output, err := c.ecrClient.GetAuthorizationToken(context.TODO(), input) - if err != nil || output == nil { - if err == nil { - if registryID == "" { - err = fmt.Errorf("missing AuthorizationData in ECR response for default registry") - } else { - err = fmt.Errorf("missing AuthorizationData in ECR response for %s", registryID) - } - } - return nil, fmt.Errorf("ecr: Failed to get authorization token: %w", err) - } - - for _, authData := range output.AuthorizationData { - if authData.ProxyEndpoint != nil && authData.AuthorizationToken != nil { - authEntry := cache.AuthEntry{ - AuthorizationToken: aws.ToString(authData.AuthorizationToken), - RequestedAt: time.Now(), - ExpiresAt: aws.ToTime(authData.ExpiresAt), - ProxyEndpoint: aws.ToString(authData.ProxyEndpoint), - Service: cache.ServiceECR, - } - registry, err := ExtractRegistry(authEntry.ProxyEndpoint) - if err != nil { - return nil, fmt.Errorf("Invalid ProxyEndpoint returned by ECR: %s", authEntry.ProxyEndpoint) - } - auth, err := extractToken(authEntry.AuthorizationToken, authEntry.ProxyEndpoint) - if err != nil { - return nil, err - } - c.credentialCache.Set(registry.ID, &authEntry) - return auth, nil - } - } - if registryID == "" { - return nil, fmt.Errorf("No AuthorizationToken found for default registry") - } - return nil, fmt.Errorf("No AuthorizationToken found for %s", registryID) -} - -func (c *defaultClient) getPublicAuthorizationToken() (*Auth, error) { - var input *ecrpublic.GetAuthorizationTokenInput - - output, err := c.ecrPublicClient.GetAuthorizationToken(context.TODO(), input) - if err != nil { - return nil, fmt.Errorf("ecr: failed to get authorization token: %w", err) - } - if output == nil || output.AuthorizationData == nil { - return nil, fmt.Errorf("ecr: missing AuthorizationData in ECR Public response") - } - authData := output.AuthorizationData - token, err := extractToken(aws.ToString(authData.AuthorizationToken), ecrPublicEndpoint) - if err != nil { - return nil, err - } - authEntry := cache.AuthEntry{ - AuthorizationToken: aws.ToString(authData.AuthorizationToken), - RequestedAt: time.Now(), - ExpiresAt: aws.ToTime(authData.ExpiresAt), - ProxyEndpoint: ecrPublicEndpoint, - Service: cache.ServiceECRPublic, - } - c.credentialCache.Set(ecrPublicName, &authEntry) - return token, nil -} - -func extractToken(token string, proxyEndpoint string) (*Auth, error) { - decodedToken, err := base64.StdEncoding.DecodeString(token) - if err != nil { - return nil, fmt.Errorf("invalid token: %w", err) - } - - parts := strings.SplitN(string(decodedToken), ":", 2) - if len(parts) < 2 { - return nil, fmt.Errorf("invalid token: expected two parts, got %d", len(parts)) - } - - return &Auth{ - Username: parts[0], - Password: parts[1], - ProxyEndpoint: proxyEndpoint, - }, nil -} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/factory.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/factory.go deleted file mode 100644 index 110a6c3bdd..0000000000 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/factory.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package api - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/ecr" - "github.com/aws/aws-sdk-go-v2/service/ecrpublic" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/transport/http" - "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache" - "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/version" -) - -// Options makes the constructors more configurable -type Options struct { - Config aws.Config - CacheDir string -} - -// ClientFactory is a factory for creating clients to interact with ECR -type ClientFactory interface { - NewClient(awsConfig aws.Config) Client - NewClientWithOptions(opts Options) Client - NewClientFromRegion(region string) Client - NewClientWithFipsEndpoint(region string) (Client, error) - NewClientWithDefaults() Client -} - -// DefaultClientFactory is a default implementation of the ClientFactory -type DefaultClientFactory struct{} - -var userAgentLoadOption = config.WithAPIOptions([]func(*middleware.Stack) error{ - http.AddHeaderValue("User-Agent", "amazon-ecr-credential-helper/"+version.Version), -}) - -// NewClientWithDefaults creates the client and defaults region -func (defaultClientFactory DefaultClientFactory) NewClientWithDefaults() Client { - awsConfig, err := config.LoadDefaultConfig(context.TODO(), userAgentLoadOption) - if err != nil { - panic(err) - } - - return defaultClientFactory.NewClientWithOptions(Options{Config: awsConfig}) -} - -// NewClientWithFipsEndpoint overrides the default ECR service endpoint in a given region to use the FIPS endpoint -func (defaultClientFactory DefaultClientFactory) NewClientWithFipsEndpoint(region string) (Client, error) { - awsConfig, err := config.LoadDefaultConfig( - context.TODO(), - userAgentLoadOption, - config.WithRegion(region), - config.WithEndpointDiscovery(aws.EndpointDiscoveryEnabled), - ) - if err != nil { - return nil, err - } - - return defaultClientFactory.NewClientWithOptions(Options{Config: awsConfig}), nil -} - -// NewClientFromRegion uses the region to create the client -func (defaultClientFactory DefaultClientFactory) NewClientFromRegion(region string) Client { - awsConfig, err := config.LoadDefaultConfig( - context.TODO(), - userAgentLoadOption, - config.WithRegion(region), - ) - if err != nil { - panic(err) - } - - return defaultClientFactory.NewClientWithOptions(Options{ - Config: awsConfig, - }) -} - -// NewClient Create new client with AWS Config -func (defaultClientFactory DefaultClientFactory) NewClient(awsConfig aws.Config) Client { - return defaultClientFactory.NewClientWithOptions(Options{Config: awsConfig}) -} - -// NewClientWithOptions Create new client with Options -func (defaultClientFactory DefaultClientFactory) NewClientWithOptions(opts Options) Client { - // The ECR Public API is only available in us-east-1 today - publicConfig := opts.Config.Copy() - publicConfig.Region = "us-east-1" - return &defaultClient{ - ecrClient: ecr.NewFromConfig(opts.Config), - ecrPublicClient: ecrpublic.NewFromConfig(publicConfig), - credentialCache: cache.BuildCredentialsCache(opts.Config, opts.CacheDir), - } -} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/build.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/build.go deleted file mode 100644 index 8aa21287dc..0000000000 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/build.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cache - -import ( - "context" - "crypto/md5" - "encoding/base64" - "fmt" - "os" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/mitchellh/go-homedir" - "github.com/sirupsen/logrus" - - ecrconfig "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config" -) - -func BuildCredentialsCache(config aws.Config, cacheDir string) CredentialsCache { - if os.Getenv("AWS_ECR_DISABLE_CACHE") != "" { - logrus.Debug("Cache disabled due to AWS_ECR_DISABLE_CACHE") - return NewNullCredentialsCache() - } - - if cacheDir == "" { - //Get cacheDir from env var "AWS_ECR_CACHE_DIR" or set to default - cacheDir = ecrconfig.GetCacheDir() - } - - cacheDir, err := homedir.Expand(cacheDir) - if err != nil { - logrus.WithError(err).Debug("Could not expand cache path, disabling cache") - return NewNullCredentialsCache() - } - - cacheFilename := "cache.json" - - credentials, err := config.Credentials.Retrieve(context.TODO()) - if err != nil { - logrus.WithError(err).Debug("Could not fetch credentials for cache prefix, disabling cache") - return NewNullCredentialsCache() - } - - return NewFileCredentialsCache(cacheDir, cacheFilename, credentialsCachePrefix(config.Region, credentials), credentialsPublicCacheKey(credentials)) -} - -// Determine a key prefix for a credentials cache. Because auth tokens are scoped to an account and region, rely on provided -// region, as well as hash of the access key. -func credentialsCachePrefix(region string, credentials aws.Credentials) string { - return fmt.Sprintf("%s-%s-", region, checksum(credentials.AccessKeyID)) -} - -func credentialsPublicCacheKey(credentials aws.Credentials) string { - return fmt.Sprintf("%s-%s", ServiceECRPublic, checksum(credentials.AccessKeyID)) -} - -// Base64 encodes an MD5 checksum. Relied on for uniqueness, and not for cryptographic security. -func checksum(text string) string { - hasher := md5.New() - data := hasher.Sum([]byte(text)) - return base64.StdEncoding.EncodeToString(data) -} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/credentials.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/credentials.go deleted file mode 100644 index 0b74c0f310..0000000000 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/credentials.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cache - -import ( - "time" -) - -type CredentialsCache interface { - Get(registry string) *AuthEntry - GetPublic() *AuthEntry - Set(registry string, entry *AuthEntry) - List() []*AuthEntry - Clear() -} - -type Service string - -const ( - ServiceECR Service = "ecr" - ServiceECRPublic Service = "ecr-public" -) - -type AuthEntry struct { - AuthorizationToken string - RequestedAt time.Time - ExpiresAt time.Time - ProxyEndpoint string - Service Service -} - -// IsValid checks if AuthEntry is still valid at testTime. AuthEntries expire at 1/2 of their original -// requested window. -func (authEntry *AuthEntry) IsValid(testTime time.Time) bool { - validWindow := authEntry.ExpiresAt.Sub(authEntry.RequestedAt) - refreshTime := authEntry.ExpiresAt.Add(-1 * validWindow / time.Duration(2)) - return testTime.Before(refreshTime) -} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/file.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/file.go deleted file mode 100644 index 19dc33bb5e..0000000000 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/file.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cache - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - - "github.com/sirupsen/logrus" -) - -const registryCacheVersion = "1.0" - -type RegistryCache struct { - Registries map[string]*AuthEntry - Version string -} - -type fileCredentialCache struct { - path string - filename string - cachePrefixKey string - publicCacheKey string -} - -func newRegistryCache() *RegistryCache { - return &RegistryCache{ - Registries: make(map[string]*AuthEntry), - Version: registryCacheVersion, - } -} - -// NewFileCredentialsCache returns a new file credentials cache. -// -// path is used for temporary files during save, and filename should be a relative filename -// in the same directory where the cache is serialized and deserialized. -// -// cachePrefixKey is used for scoping credentials for a given credential cache (i.e. region and -// accessKey). -func NewFileCredentialsCache(path string, filename string, cachePrefixKey string, publicCacheKey string) CredentialsCache { - if _, err := os.Stat(path); err != nil { - os.MkdirAll(path, 0700) - } - return &fileCredentialCache{ - path: path, - filename: filename, - cachePrefixKey: cachePrefixKey, - publicCacheKey: publicCacheKey, - } -} - -func (f *fileCredentialCache) Get(registry string) *AuthEntry { - logrus.WithField("registry", registry).Debug("Checking file cache") - registryCache := f.init() - return registryCache.Registries[f.cachePrefixKey+registry] -} - -func (f *fileCredentialCache) GetPublic() *AuthEntry { - logrus.Debug("Checking file cache for ECR Public") - registryCache := f.init() - return registryCache.Registries[f.publicCacheKey] -} - -func (f *fileCredentialCache) Set(registry string, entry *AuthEntry) { - logrus. - WithField("registry", registry). - WithField("service", entry.Service). - Debug("Saving credentials to file cache") - registryCache := f.init() - - key := f.cachePrefixKey + registry - if entry.Service == ServiceECRPublic { - key = f.publicCacheKey - } - registryCache.Registries[key] = entry - - err := f.save(registryCache) - if err != nil { - logrus.WithError(err).Info("Could not save cache") - } -} - -// List returns all of the available AuthEntries (regardless of prefix) -func (f *fileCredentialCache) List() []*AuthEntry { - registryCache := f.init() - - // optimize allocation for copy - entries := make([]*AuthEntry, 0, len(registryCache.Registries)) - - for _, entry := range registryCache.Registries { - entries = append(entries, entry) - } - - return entries -} - -func (f *fileCredentialCache) Clear() { - err := os.Remove(f.fullFilePath()) - if err != nil { - logrus.WithError(err).Info("Could not clear cache") - } -} - -func (f *fileCredentialCache) fullFilePath() string { - return filepath.Join(f.path, f.filename) -} - -// Saves credential cache to disk. This writes to a temporary file first, then moves the file to the config location. -// This eliminates from reading partially written credential files, and reduces (but does not eliminate) concurrent -// file access. There is not guarantee here for handling multiple writes at once since there is no out of process locking. -func (f *fileCredentialCache) save(registryCache *RegistryCache) error { - file, err := os.CreateTemp(f.path, ".config.json.tmp") - if err != nil { - return err - } - - buff, err := json.MarshalIndent(registryCache, "", " ") - if err != nil { - file.Close() - os.Remove(file.Name()) - return err - } - - _, err = file.Write(buff) - - if err != nil { - file.Close() - os.Remove(file.Name()) - return err - } - - file.Close() - // note this is only atomic when relying on linux syscalls - os.Rename(file.Name(), f.fullFilePath()) - return err -} - -func (f *fileCredentialCache) init() *RegistryCache { - registryCache, err := f.load() - if err != nil { - logrus.WithError(err).Info("Could not load existing cache") - f.Clear() - registryCache = newRegistryCache() - } - return registryCache -} - -// Loading a cache from disk will return errors for malformed or incompatible cache files. -func (f *fileCredentialCache) load() (*RegistryCache, error) { - registryCache := newRegistryCache() - - file, err := os.Open(f.fullFilePath()) - if os.IsNotExist(err) { - return registryCache, nil - } - - if err != nil { - return nil, err - } - - defer file.Close() - - if err = json.NewDecoder(file).Decode(®istryCache); err != nil { - return nil, err - } - - if registryCache.Version != registryCacheVersion { - return nil, fmt.Errorf("ecr: Registry cache version %#v is not compatible with %#v, ignoring existing cache", - registryCache.Version, - registryCacheVersion) - } - - // migrate entries - for key := range registryCache.Registries { - if registryCache.Registries[key].Service == "" { - registryCache.Registries[key].Service = ServiceECR - } - } - - return registryCache, nil -} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/null.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/null.go deleted file mode 100644 index 64a7212bd3..0000000000 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/null.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cache - -type nullCredentialsCache struct{} - -func NewNullCredentialsCache() CredentialsCache { - return &nullCredentialsCache{} -} - -func (n *nullCredentialsCache) Get(_ string) *AuthEntry { - return nil -} - -func (n *nullCredentialsCache) GetPublic() *AuthEntry { - return nil -} - -func (n *nullCredentialsCache) Set(_ string, _ *AuthEntry) { -} - -func (n *nullCredentialsCache) List() []*AuthEntry { - return []*AuthEntry{} -} - -func (n *nullCredentialsCache) Clear() { -} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/cache_dir.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/cache_dir.go deleted file mode 100644 index d7c2ee6446..0000000000 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/cache_dir.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package config - -import "os" - -func GetCacheDir() string { - if cacheDir := os.Getenv("AWS_ECR_CACHE_DIR"); cacheDir != "" { - return cacheDir - } - return "~/.ecr" -} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/log.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/log.go deleted file mode 100644 index 771611bad2..0000000000 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/log.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package config - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/mitchellh/go-homedir" - "github.com/sirupsen/logrus" -) - -func SetupLogger() { - logrusConfig() -} - -func logrusConfig() { - logdir, err := homedir.Expand(GetCacheDir() + "/log") - if err != nil { - fmt.Fprintf(os.Stderr, "log: failed to find directory: %v", err) - logdir = os.TempDir() - } - // Clean the path to replace with OS-specific separators - logdir = filepath.Clean(logdir) - err = os.MkdirAll(logdir, os.ModeDir|0700) - if err != nil { - fmt.Fprintf(os.Stderr, "log: failed to create directory: %v", err) - logdir = os.TempDir() - } - file, err := os.OpenFile(filepath.Join(logdir, "ecr-login.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) - if err != nil { - return - } - logrus.SetLevel(logrus.DebugLevel) - logrus.SetOutput(file) -} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/ecr.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/ecr.go deleted file mode 100644 index 461e3090a3..0000000000 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/ecr.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package ecr - -import ( - "errors" - "fmt" - "io" - - "github.com/sirupsen/logrus" - - "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api" - "github.com/docker/docker-credential-helpers/credentials" -) - -var notImplemented = errors.New("not implemented") - -type ECRHelper struct { - clientFactory api.ClientFactory - logger *logrus.Logger -} - -type Option func(*ECRHelper) - -// WithClientFactory sets the ClientFactory used to make API requests. -func WithClientFactory(clientFactory api.ClientFactory) Option { - return func(e *ECRHelper) { - e.clientFactory = clientFactory - } -} - -// WithLogger sets a new logger instance that writes to the given writer, -// instead of the default writer which writes to stderr. -// -// This can be useful if callers want to redirect logging emitted by this tool -// to another location. -func WithLogger(w io.Writer) Option { - return func(e *ECRHelper) { - logger := logrus.New() - logger.Out = w - e.logger = logger - } -} - -// NewECRHelper returns a new ECRHelper with the given options to override -// default behavior. -func NewECRHelper(opts ...Option) *ECRHelper { - e := &ECRHelper{ - clientFactory: api.DefaultClientFactory{}, - logger: logrus.StandardLogger(), - } - for _, o := range opts { - o(e) - } - - return e -} - -// ensure ECRHelper adheres to the credentials.Helper interface -var _ credentials.Helper = (*ECRHelper)(nil) - -func (ECRHelper) Add(creds *credentials.Credentials) error { - // This does not seem to get called - return notImplemented -} - -func (ECRHelper) Delete(serverURL string) error { - // This does not seem to get called - return notImplemented -} - -func (self ECRHelper) Get(serverURL string) (string, string, error) { - registry, err := api.ExtractRegistry(serverURL) - if err != nil { - self.logger. - WithError(err). - WithField("serverURL", serverURL). - Error("Error parsing the serverURL") - return "", "", credentials.NewErrCredentialsNotFound() - } - - var client api.Client - if registry.FIPS { - client, err = self.clientFactory.NewClientWithFipsEndpoint(registry.Region) - if err != nil { - self.logger.WithError(err).Error("Error resolving FIPS endpoint") - return "", "", credentials.NewErrCredentialsNotFound() - } - } else { - client = self.clientFactory.NewClientFromRegion(registry.Region) - } - - auth, err := client.GetCredentials(serverURL) - if err != nil { - self.logger.WithError(err).Error("Error retrieving credentials") - return "", "", credentials.NewErrCredentialsNotFound() - } - return auth.Username, auth.Password, nil -} - -func (self ECRHelper) List() (map[string]string, error) { - self.logger.Debug("Listing credentials") - client := self.clientFactory.NewClientWithDefaults() - - auths, err := client.ListCredentials() - if err != nil { - self.logger.WithError(err).Error("Error listing credentials") - return nil, fmt.Errorf("ecr: could not list credentials: %v", err) - } - - result := map[string]string{} - - for _, auth := range auths { - serverURL := auth.ProxyEndpoint - result[serverURL] = auth.Username - } - return result, nil -} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/version/version.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/version/version.go deleted file mode 100644 index a02a670aa6..0000000000 --- a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/version/version.go +++ /dev/null @@ -1,7 +0,0 @@ -package version - -// Version indicates which version of the binary is running. -var Version = "development" - -// GitCommitSHA indicates which git shorthash the binary was built off of -var GitCommitSHA string diff --git a/vendor/github.com/chrismellard/docker-credential-acr-env/LICENSE b/vendor/github.com/chrismellard/docker-credential-acr-env/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/chrismellard/docker-credential-acr-env/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/credhelper/helper.go b/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/credhelper/helper.go deleted file mode 100644 index 2a7cbdf07f..0000000000 --- a/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/credhelper/helper.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright © 2020 Chris Mellard chris.mellard@icloud.com - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package credhelper - -import ( - "errors" - "fmt" - "net/url" - "regexp" - - "github.com/Azure/go-autorest/autorest/azure/auth" - "github.com/chrismellard/docker-credential-acr-env/pkg/registry" - "github.com/chrismellard/docker-credential-acr-env/pkg/token" - "github.com/docker/docker-credential-helpers/credentials" -) - -var acrRE = regexp.MustCompile(`.*\.azurecr\.io|.*\.azurecr\.cn|.*\.azurecr\.de|.*\.azurecr\.us`) - -const ( - mcrHostname = "mcr.microsoft.com" - tokenUsername = "" -) - -type ACRCredHelper struct { -} - -func NewACRCredentialsHelper() credentials.Helper { - return &ACRCredHelper{} -} - -func (a ACRCredHelper) Add(_ *credentials.Credentials) error { - return errors.New("list is unimplemented") -} - -func (a ACRCredHelper) Delete(_ string) error { - return errors.New("list is unimplemented") -} - -func isACRRegistry(input string) bool { - serverURL, err := url.Parse("https://" + input) - if err != nil { - return false - } - if serverURL.Hostname() == mcrHostname { - return true - } - matches := acrRE.FindStringSubmatch(serverURL.Hostname()) - if len(matches) == 0 { - return false - } - return true -} - -func (a ACRCredHelper) Get(serverURL string) (string, string, error) { - if !isACRRegistry(serverURL) { - return "", "", errors.New("serverURL does not refer to Azure Container Registry") - } - - spToken, settings, err := token.GetServicePrincipalTokenFromEnvironment() - if err != nil { - return "", "", fmt.Errorf("failed to acquire sp token %w", err) - } - refreshToken, err := registry.GetRegistryRefreshTokenFromAADExchange(serverURL, spToken, settings.Values[auth.TenantID]) - if err != nil { - return "", "", fmt.Errorf("failed to acquire refresh token %w", err) - } - return tokenUsername, refreshToken, nil -} - -func (a ACRCredHelper) List() (map[string]string, error) { - return nil, errors.New("list is unimplemented") -} diff --git a/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/registry/const.go b/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/registry/const.go deleted file mode 100644 index 2f8ad5a0a5..0000000000 --- a/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/registry/const.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright © 2020 Chris Mellard chris.mellard@icloud.com - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package registry - -import ( - "time" -) - -const ( - secureScheme = "https://" - defaultTimeOut = time.Duration(30) * time.Second -) diff --git a/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/registry/registry.go b/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/registry/registry.go deleted file mode 100644 index c1b30dde8f..0000000000 --- a/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/registry/registry.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright © 2020 Chris Mellard chris.mellard@icloud.com - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package registry - -import ( - "context" - "fmt" - "net/url" - - "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/adal" -) - -// GetRegistryRefreshTokenFromAADExchange retrieves an OAuth2 refresh token for the registry specified by serverURL -func GetRegistryRefreshTokenFromAADExchange(serverURL string, principalToken *adal.ServicePrincipalToken, tenantID string) (string, error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeOut) - defer cancel() - - // If refreshing fails, don't try again, just fail. - principalToken.MaxMSIRefreshAttempts = 1 - - if err := principalToken.EnsureFreshWithContext(ctx); err != nil { - return "", fmt.Errorf("error refreshing sp token - %w", err) - } - - registryName, err := getRegistryURL(serverURL) - if err != nil { - return "", fmt.Errorf("failed to parse server URL - %w", err) - } - refreshTokenClient := containerregistry.NewRefreshTokensClient(registryName.String()) - authorizer := autorest.NewBearerAuthorizer(principalToken) - refreshTokenClient.Authorizer = authorizer - rt, err := refreshTokenClient.GetFromExchange(ctx, "access_token", serverURL, tenantID, "", principalToken.Token().AccessToken) - if err != nil { - return "", fmt.Errorf("failed to get refresh token for container registry - %w", err) - } - - return *rt.RefreshToken, nil -} - -// parseRegistryName parses a serverURL and returns the registry name (i.e. minus transport scheme) -func getRegistryURL(serverURL string) (*url.URL, error) { - sURL, err := url.Parse(secureScheme + serverURL) - if err != nil { - return &url.URL{}, fmt.Errorf("failed to parse server URL - %w", err) - } - - return sURL, nil -} diff --git a/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/token/token.go b/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/token/token.go deleted file mode 100644 index feff3e1a41..0000000000 --- a/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/token/token.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright © 2020 Chris Mellard chris.mellard@icloud.com - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package token - -import ( - "fmt" - "os" - - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/azure/auth" -) - -func GetServicePrincipalTokenFromEnvironment() (*adal.ServicePrincipalToken, auth.EnvironmentSettings, error) { - settings, err := auth.GetSettingsFromEnvironment() - if err != nil { - return &adal.ServicePrincipalToken{}, auth.EnvironmentSettings{}, fmt.Errorf("failed to get auth settings from environment - %w", err) - } - - spToken, err := getServicePrincipalToken(settings, settings.Environment.ResourceManagerEndpoint) - if err != nil { - return &adal.ServicePrincipalToken{}, auth.EnvironmentSettings{}, fmt.Errorf("failed to initialise sp token config %w", err) - } - - return spToken, settings, nil -} - -// getServicePrincipalToken retrieves an Azure AD OAuth2 token from the supplied environment settings for the specified resource -func getServicePrincipalToken(settings auth.EnvironmentSettings, resource string) (*adal.ServicePrincipalToken, error) { - - //1.Client Credentials - if _, e := settings.GetClientCredentials(); e == nil { - clientCredentialsConfig, err := settings.GetClientCredentials() - if err != nil { - return &adal.ServicePrincipalToken{}, fmt.Errorf("failed to get client credentials settings from environment - %w", err) - } - oAuthConfig, err := adal.NewOAuthConfig(settings.Environment.ActiveDirectoryEndpoint, clientCredentialsConfig.TenantID) - if err != nil { - return &adal.ServicePrincipalToken{}, fmt.Errorf("failed to initialise OAuthConfig - %w", err) - } - return adal.NewServicePrincipalToken(*oAuthConfig, clientCredentialsConfig.ClientID, clientCredentialsConfig.ClientSecret, clientCredentialsConfig.Resource) - } - - //2. Client Certificate - if _, e := settings.GetClientCertificate(); e == nil { - return &adal.ServicePrincipalToken{}, fmt.Errorf("authentication method currently unsupported") - } - - //3. Username Password - if _, e := settings.GetUsernamePassword(); e == nil { - return &adal.ServicePrincipalToken{}, fmt.Errorf("authentication method currently unsupported") - } - - // federated OIDC JWT assertion - jwt, err := jwtLookup() - if err == nil { - clientID, isPresent := os.LookupEnv("AZURE_CLIENT_ID") - if !isPresent { - return &adal.ServicePrincipalToken{}, fmt.Errorf("failed to get client id from environment") - } - tenantID, isPresent := os.LookupEnv("AZURE_TENANT_ID") - if !isPresent { - return &adal.ServicePrincipalToken{}, fmt.Errorf("failed to get client id from environment") - } - - oAuthConfig, err := adal.NewOAuthConfig(settings.Environment.ActiveDirectoryEndpoint, tenantID) - if err != nil { - return &adal.ServicePrincipalToken{}, fmt.Errorf("failed to initialise OAuthConfig - %w", err) - } - - return adal.NewServicePrincipalTokenFromFederatedToken(*oAuthConfig, clientID, *jwt, resource) - } - - // 4. MSI - return adal.NewServicePrincipalTokenFromManagedIdentity(resource, &adal.ManagedIdentityOptions{ - ClientID: os.Getenv("AZURE_CLIENT_ID"), - }) -} - -func jwtLookup() (*string, error) { - jwt, isPresent := os.LookupEnv("AZURE_FEDERATED_TOKEN") - if isPresent { - return &jwt, nil - } - - if jwtFile, isPresent := os.LookupEnv("AZURE_FEDERATED_TOKEN_FILE"); isPresent { - jwtBytes, err := os.ReadFile(jwtFile) - if err != nil { - return nil, err - } - jwt = string(jwtBytes) - return &jwt, nil - } - - return nil, fmt.Errorf("no JWT found") -} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go deleted file mode 100644 index b071cea51d..0000000000 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ /dev/null @@ -1,690 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - Copyright 2019 The Go Authors. All rights reserved. - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. -*/ - -package estargz - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "context" - "errors" - "fmt" - "io" - "os" - "path" - "runtime" - "strings" - "sync" - - "github.com/containerd/stargz-snapshotter/estargz/errorutil" - "github.com/klauspost/compress/zstd" - digest "github.com/opencontainers/go-digest" - "golang.org/x/sync/errgroup" -) - -type options struct { - chunkSize int - compressionLevel int - prioritizedFiles []string - missedPrioritizedFiles *[]string - compression Compression - ctx context.Context - minChunkSize int -} - -type Option func(o *options) error - -// WithChunkSize option specifies the chunk size of eStargz blob to build. -func WithChunkSize(chunkSize int) Option { - return func(o *options) error { - o.chunkSize = chunkSize - return nil - } -} - -// WithCompressionLevel option specifies the gzip compression level. -// The default is gzip.BestCompression. -// This option will be ignored if WithCompression option is used. -// See also: https://godoc.org/compress/gzip#pkg-constants -func WithCompressionLevel(level int) Option { - return func(o *options) error { - o.compressionLevel = level - return nil - } -} - -// WithPrioritizedFiles option specifies the list of prioritized files. -// These files must be complete paths that are absolute or relative to "/" -// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar" -// are treated as "/foo/bar". -func WithPrioritizedFiles(files []string) Option { - return func(o *options) error { - o.prioritizedFiles = files - return nil - } -} - -// WithAllowPrioritizeNotFound makes Build continue the execution even if some -// of prioritized files specified by WithPrioritizedFiles option aren't found -// in the input tar. Instead, this records all missed file names to the passed -// slice. -func WithAllowPrioritizeNotFound(missedFiles *[]string) Option { - return func(o *options) error { - if missedFiles == nil { - return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed") - } - o.missedPrioritizedFiles = missedFiles - return nil - } -} - -// WithCompression specifies compression algorithm to be used. -// Default is gzip. -func WithCompression(compression Compression) Option { - return func(o *options) error { - o.compression = compression - return nil - } -} - -// WithContext specifies a context that can be used for clean canceleration. -func WithContext(ctx context.Context) Option { - return func(o *options) error { - o.ctx = ctx - return nil - } -} - -// WithMinChunkSize option specifies the minimal number of bytes of data -// must be written in one gzip stream. -// By increasing this number, one gzip stream can contain multiple files -// and it hopefully leads to smaller result blob. -// NOTE: This adds a TOC property that old reader doesn't understand. -func WithMinChunkSize(minChunkSize int) Option { - return func(o *options) error { - o.minChunkSize = minChunkSize - return nil - } -} - -// Blob is an eStargz blob. -type Blob struct { - io.ReadCloser - diffID digest.Digester - tocDigest digest.Digest -} - -// DiffID returns the digest of uncompressed blob. -// It is only valid to call DiffID after Close. -func (b *Blob) DiffID() digest.Digest { - return b.diffID.Digest() -} - -// TOCDigest returns the digest of uncompressed TOC JSON. -func (b *Blob) TOCDigest() digest.Digest { - return b.tocDigest -} - -// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd -// or plain tar) passed through the argument. If there are some prioritized files are listed in -// the option, these files are grouped as "prioritized" and can be used for runtime optimization -// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several -// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs. -func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { - var opts options - opts.compressionLevel = gzip.BestCompression // BestCompression by default - for _, o := range opt { - if err := o(&opts); err != nil { - return nil, err - } - } - if opts.compression == nil { - opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) - } - layerFiles := newTempFiles() - ctx := opts.ctx - if ctx == nil { - ctx = context.Background() - } - done := make(chan struct{}) - defer close(done) - go func() { - select { - case <-done: - // nop - case <-ctx.Done(): - layerFiles.CleanupAll() - } - }() - defer func() { - if rErr != nil { - if err := layerFiles.CleanupAll(); err != nil { - rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) - } - } - if cErr := ctx.Err(); cErr != nil { - rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) - } - }() - tarBlob, err := decompressBlob(tarBlob, layerFiles) - if err != nil { - return nil, err - } - entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles) - if err != nil { - return nil, err - } - var tarParts [][]*entry - if opts.minChunkSize > 0 { - // Each entry needs to know the size of the current gzip stream so they - // cannot be processed in parallel. - tarParts = [][]*entry{entries} - } else { - tarParts = divideEntries(entries, runtime.GOMAXPROCS(0)) - } - writers := make([]*Writer, len(tarParts)) - payloads := make([]*os.File, len(tarParts)) - var mu sync.Mutex - var eg errgroup.Group - for i, parts := range tarParts { - i, parts := i, parts - // builds verifiable stargz sub-blobs - eg.Go(func() error { - esgzFile, err := layerFiles.TempFile("", "esgzdata") - if err != nil { - return err - } - sw := NewWriterWithCompressor(esgzFile, opts.compression) - sw.ChunkSize = opts.chunkSize - sw.MinChunkSize = opts.minChunkSize - if sw.needsOpenGzEntries == nil { - sw.needsOpenGzEntries = make(map[string]struct{}) - } - for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} { - sw.needsOpenGzEntries[f] = struct{}{} - } - if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { - return err - } - mu.Lock() - writers[i] = sw - payloads[i] = esgzFile - mu.Unlock() - return nil - }) - } - if err := eg.Wait(); err != nil { - rErr = err - return nil, err - } - tocAndFooter, tocDgst, err := closeWithCombine(writers...) - if err != nil { - rErr = err - return nil, err - } - var rs []io.Reader - for _, p := range payloads { - fs, err := fileSectionReader(p) - if err != nil { - return nil, err - } - rs = append(rs, fs) - } - diffID := digest.Canonical.Digester() - pr, pw := io.Pipe() - go func() { - r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) - if err != nil { - pw.CloseWithError(err) - return - } - defer r.Close() - if _, err := io.Copy(diffID.Hash(), r); err != nil { - pw.CloseWithError(err) - return - } - pw.Close() - }() - return &Blob{ - ReadCloser: readCloser{ - Reader: pr, - closeFunc: layerFiles.CleanupAll, - }, - tocDigest: tocDgst, - diffID: diffID, - }, nil -} - -// closeWithCombine takes unclosed Writers and close them. This also returns the -// toc that combined all Writers into. -// Writers doesn't write TOC and footer to the underlying writers so they can be -// combined into a single eStargz and tocAndFooter returned by this function can -// be appended at the tail of that combined blob. -func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { - if len(ws) == 0 { - return nil, "", fmt.Errorf("at least one writer must be passed") - } - for _, w := range ws { - if w.closed { - return nil, "", fmt.Errorf("writer must be unclosed") - } - defer func(w *Writer) { w.closed = true }(w) - if err := w.closeGz(); err != nil { - return nil, "", err - } - if err := w.bw.Flush(); err != nil { - return nil, "", err - } - } - var ( - mtoc = new(JTOC) - currentOffset int64 - ) - mtoc.Version = ws[0].toc.Version - for _, w := range ws { - for _, e := range w.toc.Entries { - // Recalculate Offset of non-empty files/chunks - if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" { - e.Offset += currentOffset - } - mtoc.Entries = append(mtoc.Entries, e) - } - if w.toc.Version > mtoc.Version { - mtoc.Version = w.toc.Version - } - currentOffset += w.cw.n - } - - return tocAndFooter(ws[0].compressor, mtoc, currentOffset) -} - -func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) { - buf := new(bytes.Buffer) - tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil) - if err != nil { - return nil, "", err - } - return buf, tocDigest, nil -} - -// divideEntries divides passed entries to the parts at least the number specified by the -// argument. -func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) { - var estimatedSize int64 - for _, e := range entries { - estimatedSize += e.header.Size - } - unitSize := estimatedSize / int64(minPartsNum) - var ( - nextEnd = unitSize - offset int64 - ) - set = append(set, []*entry{}) - for _, e := range entries { - set[len(set)-1] = append(set[len(set)-1], e) - offset += e.header.Size - if offset > nextEnd { - set = append(set, []*entry{}) - nextEnd += unitSize - } - } - return -} - -var errNotFound = errors.New("not found") - -// sortEntries reads the specified tar blob and returns a list of tar entries. -// If some of prioritized files are specified, the list starts from these -// files with keeping the order specified by the argument. -func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) { - - // Import tar file. - intar, err := importTar(in) - if err != nil { - return nil, fmt.Errorf("failed to sort: %w", err) - } - - // Sort the tar file respecting to the prioritized files list. - sorted := &tarFile{} - for _, l := range prioritized { - if err := moveRec(l, intar, sorted); err != nil { - if errors.Is(err, errNotFound) && missedPrioritized != nil { - *missedPrioritized = append(*missedPrioritized, l) - continue // allow not found - } - return nil, fmt.Errorf("failed to sort tar entries: %w", err) - } - } - if len(prioritized) == 0 { - sorted.add(&entry{ - header: &tar.Header{ - Name: NoPrefetchLandmark, - Typeflag: tar.TypeReg, - Size: int64(len([]byte{landmarkContents})), - }, - payload: bytes.NewReader([]byte{landmarkContents}), - }) - } else { - sorted.add(&entry{ - header: &tar.Header{ - Name: PrefetchLandmark, - Typeflag: tar.TypeReg, - Size: int64(len([]byte{landmarkContents})), - }, - payload: bytes.NewReader([]byte{landmarkContents}), - }) - } - - // Dump all entry and concatinate them. - return append(sorted.dump(), intar.dump()...), nil -} - -// readerFromEntries returns a reader of tar archive that contains entries passed -// through the arguments. -func readerFromEntries(entries ...*entry) io.Reader { - pr, pw := io.Pipe() - go func() { - tw := tar.NewWriter(pw) - defer tw.Close() - for _, entry := range entries { - if err := tw.WriteHeader(entry.header); err != nil { - pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err)) - return - } - if _, err := io.Copy(tw, entry.payload); err != nil { - pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err)) - return - } - } - pw.Close() - }() - return pr -} - -func importTar(in io.ReaderAt) (*tarFile, error) { - tf := &tarFile{} - pw, err := newCountReadSeeker(in) - if err != nil { - return nil, fmt.Errorf("failed to make position watcher: %w", err) - } - tr := tar.NewReader(pw) - - // Walk through all nodes. - for { - // Fetch and parse next header. - h, err := tr.Next() - if err != nil { - if err == io.EOF { - break - } else { - return nil, fmt.Errorf("failed to parse tar file, %w", err) - } - } - switch cleanEntryName(h.Name) { - case PrefetchLandmark, NoPrefetchLandmark: - // Ignore existing landmark - continue - } - - // Add entry. If it already exists, replace it. - if _, ok := tf.get(h.Name); ok { - tf.remove(h.Name) - } - tf.add(&entry{ - header: h, - payload: io.NewSectionReader(in, pw.currentPos(), h.Size), - }) - } - - return tf, nil -} - -func moveRec(name string, in *tarFile, out *tarFile) error { - name = cleanEntryName(name) - if name == "" { // root directory. stop recursion. - if e, ok := in.get(name); ok { - // entry of the root directory exists. we should move it as well. - // this case will occur if tar entries are prefixed with "./", "/", etc. - out.add(e) - in.remove(name) - } - return nil - } - - _, okIn := in.get(name) - _, okOut := out.get(name) - if !okIn && !okOut { - return fmt.Errorf("file: %q: %w", name, errNotFound) - } - - parent, _ := path.Split(strings.TrimSuffix(name, "/")) - if err := moveRec(parent, in, out); err != nil { - return err - } - if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink { - if err := moveRec(e.header.Linkname, in, out); err != nil { - return err - } - } - if e, ok := in.get(name); ok { - out.add(e) - in.remove(name) - } - return nil -} - -type entry struct { - header *tar.Header - payload io.ReadSeeker -} - -type tarFile struct { - index map[string]*entry - stream []*entry -} - -func (f *tarFile) add(e *entry) { - if f.index == nil { - f.index = make(map[string]*entry) - } - f.index[cleanEntryName(e.header.Name)] = e - f.stream = append(f.stream, e) -} - -func (f *tarFile) remove(name string) { - name = cleanEntryName(name) - if f.index != nil { - delete(f.index, name) - } - var filtered []*entry - for _, e := range f.stream { - if cleanEntryName(e.header.Name) == name { - continue - } - filtered = append(filtered, e) - } - f.stream = filtered -} - -func (f *tarFile) get(name string) (e *entry, ok bool) { - if f.index == nil { - return nil, false - } - e, ok = f.index[cleanEntryName(name)] - return -} - -func (f *tarFile) dump() []*entry { - return f.stream -} - -type readCloser struct { - io.Reader - closeFunc func() error -} - -func (rc readCloser) Close() error { - return rc.closeFunc() -} - -func fileSectionReader(file *os.File) (*io.SectionReader, error) { - info, err := file.Stat() - if err != nil { - return nil, err - } - return io.NewSectionReader(file, 0, info.Size()), nil -} - -func newTempFiles() *tempFiles { - return &tempFiles{} -} - -type tempFiles struct { - files []*os.File - filesMu sync.Mutex - cleanupOnce sync.Once -} - -func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { - f, err := os.CreateTemp(dir, pattern) - if err != nil { - return nil, err - } - tf.filesMu.Lock() - tf.files = append(tf.files, f) - tf.filesMu.Unlock() - return f, nil -} - -func (tf *tempFiles) CleanupAll() (err error) { - tf.cleanupOnce.Do(func() { - err = tf.cleanupAll() - }) - return -} - -func (tf *tempFiles) cleanupAll() error { - tf.filesMu.Lock() - defer tf.filesMu.Unlock() - var allErr []error - for _, f := range tf.files { - if err := f.Close(); err != nil { - allErr = append(allErr, err) - } - if err := os.Remove(f.Name()); err != nil { - allErr = append(allErr, err) - } - } - tf.files = nil - return errorutil.Aggregate(allErr) -} - -func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) { - pos := int64(0) - return &countReadSeeker{r: r, cPos: &pos}, nil -} - -type countReadSeeker struct { - r io.ReaderAt - cPos *int64 - - mu sync.Mutex -} - -func (cr *countReadSeeker) Read(p []byte) (int, error) { - cr.mu.Lock() - defer cr.mu.Unlock() - - n, err := cr.r.ReadAt(p, *cr.cPos) - if err == nil { - *cr.cPos += int64(n) - } - return n, err -} - -func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) { - cr.mu.Lock() - defer cr.mu.Unlock() - - switch whence { - default: - return 0, fmt.Errorf("Unknown whence: %v", whence) - case io.SeekStart: - case io.SeekCurrent: - offset += *cr.cPos - case io.SeekEnd: - return 0, fmt.Errorf("Unsupported whence: %v", whence) - } - - if offset < 0 { - return 0, fmt.Errorf("invalid offset") - } - *cr.cPos = offset - return offset, nil -} - -func (cr *countReadSeeker) currentPos() int64 { - cr.mu.Lock() - defer cr.mu.Unlock() - - return *cr.cPos -} - -func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) { - if org.Size() < 4 { - return org, nil - } - src := make([]byte, 4) - if _, err := org.Read(src); err != nil && err != io.EOF { - return nil, err - } - var dR io.Reader - if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { - // gzip - dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) - if err != nil { - return nil, err - } - defer dgR.Close() - dR = io.Reader(dgR) - } else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) { - // zstd - dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size())) - if err != nil { - return nil, err - } - defer dzR.Close() - dR = io.Reader(dzR) - } else { - // uncompressed - return io.NewSectionReader(org, 0, org.Size()), nil - } - b, err := tmp.TempFile("", "uncompresseddata") - if err != nil { - return nil, err - } - if _, err := io.Copy(b, dR); err != nil { - return nil, err - } - return fileSectionReader(b) -} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go deleted file mode 100644 index 6de78b02dc..0000000000 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package errorutil - -import ( - "errors" - "fmt" - "strings" -) - -// Aggregate combines a list of errors into a single new error. -func Aggregate(errs []error) error { - switch len(errs) { - case 0: - return nil - case 1: - return errs[0] - default: - points := make([]string, len(errs)+1) - points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs)) - for i, err := range errs { - points[i+1] = fmt.Sprintf("* %s", err) - } - return errors.New(strings.Join(points, "\n\t")) - } -} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go deleted file mode 100644 index f4d5546558..0000000000 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ /dev/null @@ -1,1223 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - Copyright 2019 The Go Authors. All rights reserved. - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. -*/ - -package estargz - -import ( - "bufio" - "bytes" - "compress/gzip" - "crypto/sha256" - "errors" - "fmt" - "hash" - "io" - "os" - "path" - "sort" - "strings" - "sync" - "time" - - "github.com/containerd/stargz-snapshotter/estargz/errorutil" - digest "github.com/opencontainers/go-digest" - "github.com/vbatts/tar-split/archive/tar" -) - -// A Reader permits random access reads from a stargz file. -type Reader struct { - sr *io.SectionReader - toc *JTOC - tocDigest digest.Digest - - // m stores all non-chunk entries, keyed by name. - m map[string]*TOCEntry - - // chunks stores all TOCEntry values for regular files that - // are split up. For a file with a single chunk, it's only - // stored in m. - chunks map[string][]*TOCEntry - - decompressor Decompressor -} - -type openOpts struct { - tocOffset int64 - decompressors []Decompressor - telemetry *Telemetry -} - -// OpenOption is an option used during opening the layer -type OpenOption func(o *openOpts) error - -// WithTOCOffset option specifies the offset of TOC -func WithTOCOffset(tocOffset int64) OpenOption { - return func(o *openOpts) error { - o.tocOffset = tocOffset - return nil - } -} - -// WithDecompressors option specifies decompressors to use. -// Default is gzip-based decompressor. -func WithDecompressors(decompressors ...Decompressor) OpenOption { - return func(o *openOpts) error { - o.decompressors = decompressors - return nil - } -} - -// WithTelemetry option specifies the telemetry hooks -func WithTelemetry(telemetry *Telemetry) OpenOption { - return func(o *openOpts) error { - o.telemetry = telemetry - return nil - } -} - -// MeasureLatencyHook is a func which takes start time and records the diff -type MeasureLatencyHook func(time.Time) - -// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record -// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...) -type Telemetry struct { - GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds) - GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds) - DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds) -} - -// Open opens a stargz file for reading. -// The behavior is configurable using options. -// -// Note that each entry name is normalized as the path that is relative to root. -func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { - var opts openOpts - for _, o := range opt { - if err := o(&opts); err != nil { - return nil, err - } - } - - gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} - decompressors := append(gzipCompressors, opts.decompressors...) - - // Determine the size to fetch. Try to fetch as many bytes as possible. - fetchSize := maxFooterSize(sr.Size(), decompressors...) - if maybeTocOffset := opts.tocOffset; maybeTocOffset > fetchSize { - if maybeTocOffset > sr.Size() { - return nil, fmt.Errorf("blob size %d is smaller than the toc offset", sr.Size()) - } - fetchSize = sr.Size() - maybeTocOffset - } - - start := time.Now() // before getting layer footer - footer := make([]byte, fetchSize) - if _, err := sr.ReadAt(footer, sr.Size()-fetchSize); err != nil { - return nil, fmt.Errorf("error reading footer: %v", err) - } - if opts.telemetry != nil && opts.telemetry.GetFooterLatency != nil { - opts.telemetry.GetFooterLatency(start) - } - - var allErr []error - var found bool - var r *Reader - for _, d := range decompressors { - fSize := d.FooterSize() - fOffset := positive(int64(len(footer)) - fSize) - maybeTocBytes := footer[:fOffset] - _, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:]) - if err != nil { - allErr = append(allErr, err) - continue - } - if tocOffset >= 0 && tocSize <= 0 { - tocSize = sr.Size() - tocOffset - fSize - } - if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) { - maybeTocBytes = maybeTocBytes[:tocSize] - } - r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts) - if err == nil { - found = true - break - } - allErr = append(allErr, err) - } - if !found { - return nil, errorutil.Aggregate(allErr) - } - if err := r.initFields(); err != nil { - return nil, fmt.Errorf("failed to initialize fields of entries: %v", err) - } - return r, nil -} - -// OpenFooter extracts and parses footer from the given blob. -// only supports gzip-based eStargz. -func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) { - if sr.Size() < FooterSize && sr.Size() < legacyFooterSize { - return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size()) - } - var footer [FooterSize]byte - if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil { - return 0, 0, fmt.Errorf("error reading footer: %v", err) - } - var allErr []error - for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} { - fSize := d.FooterSize() - fOffset := positive(int64(len(footer)) - fSize) - _, tocOffset, _, err := d.ParseFooter(footer[fOffset:]) - if err == nil { - return tocOffset, fSize, err - } - allErr = append(allErr, err) - } - return 0, 0, errorutil.Aggregate(allErr) -} - -// initFields populates the Reader from r.toc after decoding it from -// JSON. -// -// Unexported fields are populated and TOCEntry fields that were -// implicit in the JSON are populated. -func (r *Reader) initFields() error { - r.m = make(map[string]*TOCEntry, len(r.toc.Entries)) - r.chunks = make(map[string][]*TOCEntry) - var lastPath string - uname := map[int]string{} - gname := map[int]string{} - var lastRegEnt *TOCEntry - var chunkTopIndex int - for i, ent := range r.toc.Entries { - ent.Name = cleanEntryName(ent.Name) - switch ent.Type { - case "reg", "chunk": - if ent.Offset != r.toc.Entries[chunkTopIndex].Offset { - chunkTopIndex = i - } - ent.chunkTopIndex = chunkTopIndex - } - if ent.Type == "reg" { - lastRegEnt = ent - } - if ent.Type == "chunk" { - ent.Name = lastPath - r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) - if ent.ChunkSize == 0 && lastRegEnt != nil { - ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset - } - } else { - lastPath = ent.Name - - if ent.Uname != "" { - uname[ent.UID] = ent.Uname - } else { - ent.Uname = uname[ent.UID] - } - if ent.Gname != "" { - gname[ent.GID] = ent.Gname - } else { - ent.Gname = uname[ent.GID] - } - - ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339) - - if ent.Type == "dir" { - ent.NumLink++ // Parent dir links to this directory - } - r.m[ent.Name] = ent - } - if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size { - r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1) - r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) - } - if ent.ChunkSize == 0 && ent.Size != 0 { - ent.ChunkSize = ent.Size - } - } - - // Populate children, add implicit directories: - for _, ent := range r.toc.Entries { - if ent.Type == "chunk" { - continue - } - // add "foo/": - // add "foo" child to "" (creating "" if necessary) - // - // add "foo/bar/": - // add "bar" child to "foo" (creating "foo" if necessary) - // - // add "foo/bar.txt": - // add "bar.txt" child to "foo" (creating "foo" if necessary) - // - // add "a/b/c/d/e/f.txt": - // create "a/b/c/d/e" node - // add "f.txt" child to "e" - - name := ent.Name - pdirName := parentDir(name) - if name == pdirName { - // This entry and its parent are the same. - // Ignore this for avoiding infinite loop of the reference. - // The example case where this can occur is when tar contains the root - // directory itself (e.g. "./", "/"). - continue - } - pdir := r.getOrCreateDir(pdirName) - ent.NumLink++ // at least one name(ent.Name) references this entry. - if ent.Type == "hardlink" { - org, err := r.getSource(ent) - if err != nil { - return err - } - org.NumLink++ // original entry is referenced by this ent.Name. - ent = org - } - pdir.addChild(path.Base(name), ent) - } - - lastOffset := r.sr.Size() - for i := len(r.toc.Entries) - 1; i >= 0; i-- { - e := r.toc.Entries[i] - if e.isDataType() { - e.nextOffset = lastOffset - } - if e.Offset != 0 && e.InnerOffset == 0 { - lastOffset = e.Offset - } - } - - return nil -} - -func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) { - if ent.Type == "hardlink" { - org, ok := r.m[cleanEntryName(ent.LinkName)] - if !ok { - return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName) - } - ent, err = r.getSource(org) - if err != nil { - return nil, err - } - } - return ent, nil -} - -func parentDir(p string) string { - dir, _ := path.Split(p) - return strings.TrimSuffix(dir, "/") -} - -func (r *Reader) getOrCreateDir(d string) *TOCEntry { - e, ok := r.m[d] - if !ok { - e = &TOCEntry{ - Name: d, - Type: "dir", - Mode: 0755, - NumLink: 2, // The directory itself(.) and the parent link to this directory. - } - r.m[d] = e - if d != "" { - pdir := r.getOrCreateDir(parentDir(d)) - pdir.addChild(path.Base(d), e) - } - } - return e -} - -func (r *Reader) TOCDigest() digest.Digest { - return r.tocDigest -} - -// VerifyTOC checks that the TOC JSON in the passed blob matches the -// passed digests and that the TOC JSON contains digests for all chunks -// contained in the blob. If the verification succceeds, this function -// returns TOCEntryVerifier which holds all chunk digests in the stargz blob. -func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) { - // Verify the digest of TOC JSON - if r.tocDigest != tocDigest { - return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest) - } - return r.Verifiers() -} - -// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases -// because this doesn't verify TOC. -func (r *Reader) Verifiers() (TOCEntryVerifier, error) { - chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest - regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest - var chunkDigestMapIncomplete bool - var regDigestMapIncomplete bool - var containsChunk bool - for _, e := range r.toc.Entries { - if e.Type != "reg" && e.Type != "chunk" { - continue - } - - // offset must be unique in stargz blob - _, dOK := chunkDigestMap[e.Offset] - _, rOK := regDigestMap[e.Offset] - if dOK || rOK { - return nil, fmt.Errorf("offset %d found twice", e.Offset) - } - - if e.Type == "reg" { - if e.Size == 0 { - continue // ignores empty file - } - - // record the digest of regular file payload - if e.Digest != "" { - d, err := digest.Parse(e.Digest) - if err != nil { - return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err) - } - regDigestMap[e.Offset] = d - } else { - regDigestMapIncomplete = true - } - } else { - containsChunk = true // this layer contains "chunk" entries. - } - - // "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of - // chunked file) - if e.ChunkDigest != "" { - d, err := digest.Parse(e.ChunkDigest) - if err != nil { - return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err) - } - chunkDigestMap[e.Offset] = d - } else { - chunkDigestMapIncomplete = true - } - } - - if chunkDigestMapIncomplete { - // Though some chunk digests are not found, if this layer doesn't contain - // "chunk"s and all digest of "reg" files are recorded, we can use them instead. - if !containsChunk && !regDigestMapIncomplete { - return &verifier{digestMap: regDigestMap}, nil - } - return nil, fmt.Errorf("some ChunkDigest not found in TOC JSON") - } - - return &verifier{digestMap: chunkDigestMap}, nil -} - -// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by -// offset of the chunk. -type verifier struct { - digestMap map[int64]digest.Digest - digestMapMu sync.Mutex -} - -// Verifier returns a content verifier specified by TOCEntry. -func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) { - v.digestMapMu.Lock() - defer v.digestMapMu.Unlock() - d, ok := v.digestMap[ce.Offset] - if !ok { - return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered", - ce.Offset, ce.ChunkSize) - } - return d.Verifier(), nil -} - -// ChunkEntryForOffset returns the TOCEntry containing the byte of the -// named file at the given offset within the file. -// Name must be absolute path or one that is relative to root. -func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) { - name = cleanEntryName(name) - e, ok = r.Lookup(name) - if !ok || !e.isDataType() { - return nil, false - } - ents := r.chunks[name] - if len(ents) < 2 { - if offset >= e.ChunkSize { - return nil, false - } - return e, true - } - i := sort.Search(len(ents), func(i int) bool { - e := ents[i] - return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize) - }) - if i == len(ents) { - return nil, false - } - return ents[i], true -} - -// Lookup returns the Table of Contents entry for the given path. -// -// To get the root directory, use the empty string. -// Path must be absolute path or one that is relative to root. -func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { - path = cleanEntryName(path) - if r == nil { - return - } - e, ok = r.m[path] - if ok && e.Type == "hardlink" { - var err error - e, err = r.getSource(e) - if err != nil { - return nil, false - } - } - return -} - -// OpenFile returns the reader of the specified file payload. -// -// Name must be absolute path or one that is relative to root. -func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { - fr, err := r.newFileReader(name) - if err != nil { - return nil, err - } - return io.NewSectionReader(fr, 0, fr.size), nil -} - -func (r *Reader) newFileReader(name string) (*fileReader, error) { - name = cleanEntryName(name) - ent, ok := r.Lookup(name) - if !ok { - // TODO: come up with some error plan. This is lazy: - return nil, &os.PathError{ - Path: name, - Op: "OpenFile", - Err: os.ErrNotExist, - } - } - if ent.Type != "reg" { - return nil, &os.PathError{ - Path: name, - Op: "OpenFile", - Err: errors.New("not a regular file"), - } - } - return &fileReader{ - r: r, - size: ent.Size, - ents: r.getChunks(ent), - }, nil -} - -func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) { - fr, err := r.newFileReader(name) - if err != nil { - return nil, err - } - fr.preRead = preRead - return io.NewSectionReader(fr, 0, fr.size), nil -} - -func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry { - if ents, ok := r.chunks[ent.Name]; ok { - return ents - } - return []*TOCEntry{ent} -} - -type fileReader struct { - r *Reader - size int64 - ents []*TOCEntry // 1 or more reg/chunk entries - preRead func(*TOCEntry, io.Reader) error -} - -func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { - if off >= fr.size { - return 0, io.EOF - } - if off < 0 { - return 0, errors.New("invalid offset") - } - var i int - if len(fr.ents) > 1 { - i = sort.Search(len(fr.ents), func(i int) bool { - return fr.ents[i].ChunkOffset >= off - }) - if i == len(fr.ents) { - i = len(fr.ents) - 1 - } - } - ent := fr.ents[i] - if ent.ChunkOffset > off { - if i == 0 { - return 0, errors.New("internal error; first chunk offset is non-zero") - } - ent = fr.ents[i-1] - } - - // If ent is a chunk of a large file, adjust the ReadAt - // offset by the chunk's offset. - off -= ent.ChunkOffset - - finalEnt := fr.ents[len(fr.ents)-1] - compressedOff := ent.Offset - // compressedBytesRemain is the number of compressed bytes in this - // file remaining, over 1+ chunks. - compressedBytesRemain := finalEnt.NextOffset() - compressedOff - - sr := io.NewSectionReader(fr.r.sr, compressedOff, compressedBytesRemain) - - const maxRead = 2 << 20 - var bufSize = maxRead - if compressedBytesRemain < maxRead { - bufSize = int(compressedBytesRemain) - } - - br := bufio.NewReaderSize(sr, bufSize) - if _, err := br.Peek(bufSize); err != nil { - return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err) - } - - dr, err := fr.r.decompressor.Reader(br) - if err != nil { - return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) - } - defer dr.Close() - - if fr.preRead == nil { - if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil { - return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err) - } - return io.ReadFull(dr, p) - } - - var retN int - var retErr error - var found bool - var nr int64 - for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] { - if !e.isDataType() { - continue - } - if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset { - break - } - if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr { - return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err) - } - nr = e.InnerOffset - if e == ent { - found = true - if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { - return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err) - } - retN, retErr = io.ReadFull(dr, p) - nr += off + int64(retN) - continue - } - cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)} - if err := fr.preRead(e, cr); err != nil { - return 0, fmt.Errorf("failed to pre read: %w", err) - } - nr += cr.n - } - if !found { - return 0, fmt.Errorf("fileReader.ReadAt: target entry not found") - } - return retN, retErr -} - -// A Writer writes stargz files. -// -// Use NewWriter to create a new Writer. -type Writer struct { - bw *bufio.Writer - cw *countWriter - toc *JTOC - diffHash hash.Hash // SHA-256 of uncompressed tar - - closed bool - gz io.WriteCloser - lastUsername map[int]string - lastGroupname map[int]string - compressor Compressor - - uncompressedCounter *countWriteFlusher - - // ChunkSize optionally controls the maximum number of bytes - // of data of a regular file that can be written in one gzip - // stream before a new gzip stream is started. - // Zero means to use a default, currently 4 MiB. - ChunkSize int - - // MinChunkSize optionally controls the minimum number of bytes - // of data must be written in one gzip stream before a new gzip - // NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand. - MinChunkSize int - - needsOpenGzEntries map[string]struct{} -} - -// currentCompressionWriter writes to the current w.gz field, which can -// change throughout writing a tar entry. -// -// Additionally, it updates w's SHA-256 of the uncompressed bytes -// of the tar file. -type currentCompressionWriter struct{ w *Writer } - -func (ccw currentCompressionWriter) Write(p []byte) (int, error) { - ccw.w.diffHash.Write(p) - if ccw.w.gz == nil { - if err := ccw.w.condOpenGz(); err != nil { - return 0, err - } - } - return ccw.w.gz.Write(p) -} - -func (w *Writer) chunkSize() int { - if w.ChunkSize <= 0 { - return 4 << 20 - } - return w.ChunkSize -} - -// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob. -// TOC JSON and footer are removed. -func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { - footerSize := c.FooterSize() - if sr.Size() < footerSize { - return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize) - } - footerOffset := sr.Size() - footerSize - footer := make([]byte, footerSize) - if _, err := sr.ReadAt(footer, footerOffset); err != nil { - return nil, err - } - blobPayloadSize, _, _, err := c.ParseFooter(footer) - if err != nil { - return nil, fmt.Errorf("failed to parse footer: %w", err) - } - if blobPayloadSize < 0 { - blobPayloadSize = sr.Size() - } - return c.Reader(io.LimitReader(sr, blobPayloadSize)) -} - -// NewWriter returns a new stargz writer (gzip-based) writing to w. -// -// The writer must be closed to write its trailing table of contents. -func NewWriter(w io.Writer) *Writer { - return NewWriterLevel(w, gzip.BestCompression) -} - -// NewWriterLevel returns a new stargz writer (gzip-based) writing to w. -// The compression level is configurable. -// -// The writer must be closed to write its trailing table of contents. -func NewWriterLevel(w io.Writer, compressionLevel int) *Writer { - return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel)) -} - -// NewWriterWithCompressor returns a new stargz writer writing to w. -// The compression method is configurable. -// -// The writer must be closed to write its trailing table of contents. -func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer { - bw := bufio.NewWriter(w) - cw := &countWriter{w: bw} - return &Writer{ - bw: bw, - cw: cw, - toc: &JTOC{Version: 1}, - diffHash: sha256.New(), - compressor: c, - uncompressedCounter: &countWriteFlusher{}, - } -} - -// Close writes the stargz's table of contents and flushes all the -// buffers, returning any error. -func (w *Writer) Close() (digest.Digest, error) { - if w.closed { - return "", nil - } - defer func() { w.closed = true }() - - if err := w.closeGz(); err != nil { - return "", err - } - - // Write the TOC index and footer. - tocDigest, err := w.compressor.WriteTOCAndFooter(w.cw, w.cw.n, w.toc, w.diffHash) - if err != nil { - return "", err - } - if err := w.bw.Flush(); err != nil { - return "", err - } - - return tocDigest, nil -} - -func (w *Writer) closeGz() error { - if w.closed { - return errors.New("write on closed Writer") - } - if w.gz != nil { - if err := w.gz.Close(); err != nil { - return err - } - w.gz = nil - } - return nil -} - -func (w *Writer) flushGz() error { - if w.closed { - return errors.New("flush on closed Writer") - } - if w.gz != nil { - if f, ok := w.gz.(interface { - Flush() error - }); ok { - return f.Flush() - } - } - return nil -} - -// nameIfChanged returns name, unless it was the already the value of (*mp)[id], -// in which case it returns the empty string. -func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { - if name == "" { - return "" - } - if *mp == nil { - *mp = make(map[int]string) - } - if (*mp)[id] == name { - return "" - } - (*mp)[id] = name - return name -} - -func (w *Writer) condOpenGz() (err error) { - if w.gz == nil { - w.gz, err = w.compressor.Writer(w.cw) - if w.gz != nil { - w.gz = w.uncompressedCounter.register(w.gz) - } - } - return -} - -// AppendTar reads the tar or tar.gz file from r and appends -// each of its contents to w. -// -// The input r can optionally be gzip compressed but the output will -// always be compressed by the specified compressor. -func (w *Writer) AppendTar(r io.Reader) error { - return w.appendTar(r, false) -} - -// AppendTarLossLess reads the tar or tar.gz file from r and appends -// each of its contents to w. -// -// The input r can optionally be gzip compressed but the output will -// always be compressed by the specified compressor. -// -// The difference of this func with AppendTar is that this writes -// the input tar stream into w without any modification (e.g. to header bytes). -// -// Note that if the input tar stream already contains TOC JSON, this returns -// error because w cannot overwrite the TOC JSON to the one generated by w without -// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz, -// you shoud decompress it and remove TOC JSON in advance. -func (w *Writer) AppendTarLossLess(r io.Reader) error { - return w.appendTar(r, true) -} - -func (w *Writer) appendTar(r io.Reader, lossless bool) error { - var src io.Reader - br := bufio.NewReader(r) - if isGzip(br) { - zr, _ := gzip.NewReader(br) - src = zr - } else { - src = io.Reader(br) - } - dst := currentCompressionWriter{w} - var tw *tar.Writer - if !lossless { - tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode. - } - tr := tar.NewReader(src) - if lossless { - tr.RawAccounting = true - } - prevOffset := w.cw.n - var prevOffsetUncompressed int64 - for { - h, err := tr.Next() - if err == io.EOF { - if lossless { - if remain := tr.RawBytes(); len(remain) > 0 { - // Collect the remaining null bytes. - // https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53 - if _, err := dst.Write(remain); err != nil { - return err - } - } - } - break - } - if err != nil { - return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err) - } - if cleanEntryName(h.Name) == TOCTarName { - // It is possible for a layer to be "stargzified" twice during the - // distribution lifecycle. So we reserve "TOCTarName" here to avoid - // duplicated entries in the resulting layer. - if lossless { - // We cannot handle this in lossless way. - return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append") - } - continue - } - - xattrs := make(map[string][]byte) - const xattrPAXRecordsPrefix = "SCHILY.xattr." - if h.PAXRecords != nil { - for k, v := range h.PAXRecords { - if strings.HasPrefix(k, xattrPAXRecordsPrefix) { - xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v) - } - } - } - ent := &TOCEntry{ - Name: h.Name, - Mode: h.Mode, - UID: h.Uid, - GID: h.Gid, - Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname), - Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname), - ModTime3339: formatModtime(h.ModTime), - Xattrs: xattrs, - } - if err := w.condOpenGz(); err != nil { - return err - } - if tw != nil { - if err := tw.WriteHeader(h); err != nil { - return err - } - } else { - if _, err := dst.Write(tr.RawBytes()); err != nil { - return err - } - } - switch h.Typeflag { - case tar.TypeLink: - ent.Type = "hardlink" - ent.LinkName = h.Linkname - case tar.TypeSymlink: - ent.Type = "symlink" - ent.LinkName = h.Linkname - case tar.TypeDir: - ent.Type = "dir" - case tar.TypeReg: - ent.Type = "reg" - ent.Size = h.Size - case tar.TypeChar: - ent.Type = "char" - ent.DevMajor = int(h.Devmajor) - ent.DevMinor = int(h.Devminor) - case tar.TypeBlock: - ent.Type = "block" - ent.DevMajor = int(h.Devmajor) - ent.DevMinor = int(h.Devminor) - case tar.TypeFifo: - ent.Type = "fifo" - default: - return fmt.Errorf("unsupported input tar entry %q", h.Typeflag) - } - - // We need to keep a reference to the TOC entry for regular files, so that we - // can fill the digest later. - var regFileEntry *TOCEntry - var payloadDigest digest.Digester - if h.Typeflag == tar.TypeReg { - regFileEntry = ent - payloadDigest = digest.Canonical.Digester() - } - - if h.Typeflag == tar.TypeReg && ent.Size > 0 { - var written int64 - totalSize := ent.Size // save it before we destroy ent - tee := io.TeeReader(tr, payloadDigest.Hash()) - for written < totalSize { - chunkSize := int64(w.chunkSize()) - remain := totalSize - written - if remain < chunkSize { - chunkSize = remain - } else { - ent.ChunkSize = chunkSize - } - - // We flush the underlying compression writer here to correctly calculate "w.cw.n". - if err := w.flushGz(); err != nil { - return err - } - if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) { - if err := w.closeGz(); err != nil { - return err - } - ent.Offset = w.cw.n - prevOffset = ent.Offset - prevOffsetUncompressed = w.uncompressedCounter.n - } else { - ent.Offset = prevOffset - ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed - } - - ent.ChunkOffset = written - chunkDigest := digest.Canonical.Digester() - - if err := w.condOpenGz(); err != nil { - return err - } - - teeChunk := io.TeeReader(tee, chunkDigest.Hash()) - var out io.Writer - if tw != nil { - out = tw - } else { - out = dst - } - if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil { - return fmt.Errorf("error copying %q: %v", h.Name, err) - } - ent.ChunkDigest = chunkDigest.Digest().String() - w.toc.Entries = append(w.toc.Entries, ent) - written += chunkSize - ent = &TOCEntry{ - Name: h.Name, - Type: "chunk", - } - } - } else { - w.toc.Entries = append(w.toc.Entries, ent) - } - if payloadDigest != nil { - regFileEntry.Digest = payloadDigest.Digest().String() - } - if tw != nil { - if err := tw.Flush(); err != nil { - return err - } - } - } - remainDest := io.Discard - if lossless { - remainDest = dst // Preserve the remaining bytes in lossless mode - } - _, err := io.Copy(remainDest, src) - return err -} - -func (w *Writer) needsOpenGz(ent *TOCEntry) bool { - if ent.Type != "reg" { - return false - } - if w.needsOpenGzEntries == nil { - return false - } - _, ok := w.needsOpenGzEntries[ent.Name] - return ok -} - -// DiffID returns the SHA-256 of the uncompressed tar bytes. -// It is only valid to call DiffID after Close. -func (w *Writer) DiffID() string { - return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil)) -} - -func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) { - for _, d := range decompressors { - if s := d.FooterSize(); res < s && s <= blobSize { - res = s - } - } - return -} - -func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) { - if tocOff < 0 { - // This means that TOC isn't contained in the blob. - // We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from - // the external location. - start := time.Now() - toc, tocDgst, err := d.ParseTOC(nil) - if err != nil { - return nil, err - } - if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { - opts.telemetry.GetTocLatency(start) - } - if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { - opts.telemetry.DeserializeTocLatency(start) - } - return &Reader{ - sr: sr, - toc: toc, - tocDigest: tocDgst, - decompressor: d, - }, nil - } - if len(tocBytes) > 0 { - start := time.Now() - toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) - if err == nil { - if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { - opts.telemetry.DeserializeTocLatency(start) - } - return &Reader{ - sr: sr, - toc: toc, - tocDigest: tocDgst, - decompressor: d, - }, nil - } - } - - start := time.Now() - tocBytes = make([]byte, tocSize) - if _, err := sr.ReadAt(tocBytes, tocOff); err != nil { - return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocBytes), err) - } - if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { - opts.telemetry.GetTocLatency(start) - } - start = time.Now() - toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) - if err != nil { - return nil, err - } - if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { - opts.telemetry.DeserializeTocLatency(start) - } - return &Reader{ - sr: sr, - toc: toc, - tocDigest: tocDgst, - decompressor: d, - }, nil -} - -func formatModtime(t time.Time) string { - if t.IsZero() || t.Unix() == 0 { - return "" - } - return t.UTC().Round(time.Second).Format(time.RFC3339) -} - -func cleanEntryName(name string) string { - // Use path.Clean to consistently deal with path separators across platforms. - return strings.TrimPrefix(path.Clean("/"+name), "/") -} - -// countWriter counts how many bytes have been written to its wrapped -// io.Writer. -type countWriter struct { - w io.Writer - n int64 -} - -func (cw *countWriter) Write(p []byte) (n int, err error) { - n, err = cw.w.Write(p) - cw.n += int64(n) - return -} - -type countWriteFlusher struct { - io.WriteCloser - n int64 -} - -func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser { - wc.WriteCloser = w - return wc -} - -func (wc *countWriteFlusher) Write(p []byte) (n int, err error) { - n, err = wc.WriteCloser.Write(p) - wc.n += int64(n) - return -} - -func (wc *countWriteFlusher) Flush() error { - if f, ok := wc.WriteCloser.(interface { - Flush() error - }); ok { - return f.Flush() - } - return nil -} - -func (wc *countWriteFlusher) Close() error { - err := wc.WriteCloser.Close() - wc.WriteCloser = nil - return err -} - -// isGzip reports whether br is positioned right before an upcoming gzip stream. -// It does not consume any bytes from br. -func isGzip(br *bufio.Reader) bool { - const ( - gzipID1 = 0x1f - gzipID2 = 0x8b - gzipDeflate = 8 - ) - peek, _ := br.Peek(3) - return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate -} - -func positive(n int64) int64 { - if n < 0 { - return 0 - } - return n -} - -type countReader struct { - r io.Reader - n int64 -} - -func (cr *countReader) Read(p []byte) (n int, err error) { - n, err = cr.r.Read(p) - cr.n += int64(n) - return -} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go deleted file mode 100644 index f24afe32f4..0000000000 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go +++ /dev/null @@ -1,237 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - Copyright 2019 The Go Authors. All rights reserved. - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. -*/ - -package estargz - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "encoding/binary" - "encoding/json" - "fmt" - "hash" - "io" - "strconv" - - digest "github.com/opencontainers/go-digest" -) - -type gzipCompression struct { - *GzipCompressor - *GzipDecompressor -} - -func newGzipCompressionWithLevel(level int) Compression { - return &gzipCompression{ - &GzipCompressor{level}, - &GzipDecompressor{}, - } -} - -func NewGzipCompressor() *GzipCompressor { - return &GzipCompressor{gzip.BestCompression} -} - -func NewGzipCompressorWithLevel(level int) *GzipCompressor { - return &GzipCompressor{level} -} - -type GzipCompressor struct { - compressionLevel int -} - -func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) { - return gzip.NewWriterLevel(w, gc.compressionLevel) -} - -func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) { - tocJSON, err := json.MarshalIndent(toc, "", "\t") - if err != nil { - return "", err - } - gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel) - gw := io.Writer(gz) - if diffHash != nil { - gw = io.MultiWriter(gz, diffHash) - } - tw := tar.NewWriter(gw) - if err := tw.WriteHeader(&tar.Header{ - Typeflag: tar.TypeReg, - Name: TOCTarName, - Size: int64(len(tocJSON)), - }); err != nil { - return "", err - } - if _, err := tw.Write(tocJSON); err != nil { - return "", err - } - - if err := tw.Close(); err != nil { - return "", err - } - if err := gz.Close(); err != nil { - return "", err - } - if _, err := w.Write(gzipFooterBytes(off)); err != nil { - return "", err - } - return digest.FromBytes(tocJSON), nil -} - -// gzipFooterBytes returns the 51 bytes footer. -func gzipFooterBytes(tocOff int64) []byte { - buf := bytes.NewBuffer(make([]byte, 0, FooterSize)) - gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes - - // Extra header indicating the offset of TOCJSON - // https://tools.ietf.org/html/rfc1952#section-2.3.1.1 - header := make([]byte, 4) - header[0], header[1] = 'S', 'G' - subfield := fmt.Sprintf("%016xSTARGZ", tocOff) - binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 - gz.Header.Extra = append(header, []byte(subfield)...) - gz.Close() - if buf.Len() != FooterSize { - panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) - } - return buf.Bytes() -} - -type GzipDecompressor struct{} - -func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { - return gzip.NewReader(r) -} - -func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { - return parseTOCEStargz(r) -} - -func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { - if len(p) != FooterSize { - return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p)) - } - zr, err := gzip.NewReader(bytes.NewReader(p)) - if err != nil { - return 0, 0, 0, err - } - defer zr.Close() - extra := zr.Header.Extra - si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] - if si1 != 'S' || si2 != 'G' { - return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) - } - if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) { - return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ")) - } - if string(subfield[16:]) != "STARGZ" { - return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield") - } - tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) - } - return tocOffset, tocOffset, 0, nil -} - -func (gz *GzipDecompressor) FooterSize() int64 { - return FooterSize -} - -func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { - return decompressTOCEStargz(r) -} - -type LegacyGzipDecompressor struct{} - -func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { - return gzip.NewReader(r) -} - -func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { - return parseTOCEStargz(r) -} - -func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { - if len(p) != legacyFooterSize { - return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p)) - } - zr, err := gzip.NewReader(bytes.NewReader(p)) - if err != nil { - return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) - } - defer zr.Close() - extra := zr.Header.Extra - if len(extra) != 16+len("STARGZ") { - return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size") - } - if string(extra[16:]) != "STARGZ" { - return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found") - } - tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) - } - return tocOffset, tocOffset, 0, nil -} - -func (gz *LegacyGzipDecompressor) FooterSize() int64 { - return legacyFooterSize -} - -func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { - return decompressTOCEStargz(r) -} - -func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { - tr, err := decompressTOCEStargz(r) - if err != nil { - return nil, "", err - } - dgstr := digest.Canonical.Digester() - toc = new(JTOC) - if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil { - return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err) - } - if err := tr.Close(); err != nil { - return nil, "", err - } - return toc, dgstr.Digest(), nil -} - -func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) { - zr, err := gzip.NewReader(r) - if err != nil { - return nil, fmt.Errorf("malformed TOC gzip header: %v", err) - } - zr.Multistream(false) - tr := tar.NewReader(zr) - h, err := tr.Next() - if err != nil { - return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err) - } - if h.Name != TOCTarName { - return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName) - } - return readCloser{tr, zr.Close}, nil -} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go deleted file mode 100644 index 0ca6fd75f2..0000000000 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ /dev/null @@ -1,2366 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - Copyright 2019 The Go Authors. All rights reserved. - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. -*/ - -package estargz - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "crypto/sha256" - "encoding/json" - "errors" - "fmt" - "io" - "math/rand" - "os" - "path/filepath" - "reflect" - "sort" - "strings" - "testing" - "time" - - "github.com/containerd/stargz-snapshotter/estargz/errorutil" - "github.com/klauspost/compress/zstd" - digest "github.com/opencontainers/go-digest" -) - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -// TestingController is Compression with some helper methods necessary for testing. -type TestingController interface { - Compression - TestStreams(t *testing.T, b []byte, streams []int64) - DiffIDOf(*testing.T, []byte) string - String() string -} - -// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. -func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) { - t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) - t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) - t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) -} - -type TestingControllerFactory func() TestingController - -const ( - uncompressedType int = iota - gzipType - zstdType -) - -var srcCompressions = []int{ - uncompressedType, - gzipType, - zstdType, -} - -var allowedPrefix = [4]string{"", "./", "/", "../"} - -// testBuild tests the resulting stargz blob built by this pkg has the same -// contents as the normal stargz blob. -func testBuild(t *testing.T, controllers ...TestingControllerFactory) { - tests := []struct { - name string - chunkSize int - minChunkSize []int - in []tarEntry - }{ - { - name: "regfiles and directories", - chunkSize: 4, - in: tarOf( - file("foo", "test1"), - dir("foo2/"), - file("foo2/bar", "test2", xAttr(map[string]string{"test": "sample"})), - ), - }, - { - name: "empty files", - chunkSize: 4, - in: tarOf( - file("foo", "tttttt"), - file("foo_empty", ""), - file("foo2", "tttttt"), - file("foo_empty2", ""), - file("foo3", "tttttt"), - file("foo_empty3", ""), - file("foo4", "tttttt"), - file("foo_empty4", ""), - file("foo5", "tttttt"), - file("foo_empty5", ""), - file("foo6", "tttttt"), - ), - }, - { - name: "various files", - chunkSize: 4, - minChunkSize: []int{0, 64000}, - in: tarOf( - file("baz.txt", "bazbazbazbazbazbazbaz"), - file("foo1.txt", "a"), - file("bar/foo2.txt", "b"), - file("foo3.txt", "c"), - symlink("barlink", "test/bar.txt"), - dir("test/"), - dir("dev/"), - blockdev("dev/testblock", 3, 4), - fifo("dev/testfifo"), - chardev("dev/testchar1", 5, 6), - file("test/bar.txt", "testbartestbar", xAttr(map[string]string{"test2": "sample2"})), - dir("test2/"), - link("test2/bazlink", "baz.txt"), - chardev("dev/testchar2", 1, 2), - ), - }, - { - name: "no contents", - chunkSize: 4, - in: tarOf( - file("baz.txt", ""), - symlink("barlink", "test/bar.txt"), - dir("test/"), - dir("dev/"), - blockdev("dev/testblock", 3, 4), - fifo("dev/testfifo"), - chardev("dev/testchar1", 5, 6), - file("test/bar.txt", "", xAttr(map[string]string{"test2": "sample2"})), - dir("test2/"), - link("test2/bazlink", "baz.txt"), - chardev("dev/testchar2", 1, 2), - ), - }, - } - for _, tt := range tests { - if len(tt.minChunkSize) == 0 { - tt.minChunkSize = []int{0} - } - for _, srcCompression := range srcCompressions { - srcCompression := srcCompression - for _, newCL := range controllers { - newCL := newCL - for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { - srcTarFormat := srcTarFormat - for _, prefix := range allowedPrefix { - prefix := prefix - for _, minChunkSize := range tt.minChunkSize { - minChunkSize := minChunkSize - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) { - tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) - // Test divideEntries() - entries, err := sortEntries(tarBlob, nil, nil) // identical order - if err != nil { - t.Fatalf("failed to parse tar: %v", err) - } - var merged []*entry - for _, part := range divideEntries(entries, 4) { - merged = append(merged, part...) - } - if !reflect.DeepEqual(entries, merged) { - for _, e := range entries { - t.Logf("Original: %v", e.header) - } - for _, e := range merged { - t.Logf("Merged: %v", e.header) - } - t.Errorf("divided entries couldn't be merged") - return - } - - // Prepare sample data - cl1 := newCL() - wantBuf := new(bytes.Buffer) - sw := NewWriterWithCompressor(wantBuf, cl1) - sw.MinChunkSize = minChunkSize - sw.ChunkSize = tt.chunkSize - if err := sw.AppendTar(tarBlob); err != nil { - t.Fatalf("failed to append tar to want stargz: %v", err) - } - if _, err := sw.Close(); err != nil { - t.Fatalf("failed to prepare want stargz: %v", err) - } - wantData := wantBuf.Bytes() - want, err := Open(io.NewSectionReader( - bytes.NewReader(wantData), 0, int64(len(wantData))), - WithDecompressors(cl1), - ) - if err != nil { - t.Fatalf("failed to parse the want stargz: %v", err) - } - - // Prepare testing data - var opts []Option - if minChunkSize > 0 { - opts = append(opts, WithMinChunkSize(minChunkSize)) - } - cl2 := newCL() - rc, err := Build(compressBlob(t, tarBlob, srcCompression), - append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...) - if err != nil { - t.Fatalf("failed to build stargz: %v", err) - } - defer rc.Close() - gotBuf := new(bytes.Buffer) - if _, err := io.Copy(gotBuf, rc); err != nil { - t.Fatalf("failed to copy built stargz blob: %v", err) - } - gotData := gotBuf.Bytes() - got, err := Open(io.NewSectionReader( - bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), - WithDecompressors(cl2), - ) - if err != nil { - t.Fatalf("failed to parse the got stargz: %v", err) - } - - // Check DiffID is properly calculated - rc.Close() - diffID := rc.DiffID() - wantDiffID := cl2.DiffIDOf(t, gotData) - if diffID.String() != wantDiffID { - t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) - } - - // Compare as stargz - if !isSameVersion(t, cl1, wantData, cl2, gotData) { - t.Errorf("built stargz hasn't same json") - return - } - if !isSameEntries(t, want, got) { - t.Errorf("built stargz isn't same as the original") - return - } - - // Compare as tar.gz - if !isSameTarGz(t, cl1, wantData, cl2, gotData) { - t.Errorf("built stargz isn't same tar.gz") - return - } - }) - } - } - } - } - } - } -} - -func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { - aGz, err := cla.Reader(bytes.NewReader(a)) - if err != nil { - t.Fatalf("failed to read A") - } - defer aGz.Close() - bGz, err := clb.Reader(bytes.NewReader(b)) - if err != nil { - t.Fatalf("failed to read B") - } - defer bGz.Close() - - // Same as tar's Next() method but ignores landmarks and TOCJSON file - next := func(r *tar.Reader) (h *tar.Header, err error) { - for { - if h, err = r.Next(); err != nil { - return - } - if h.Name != PrefetchLandmark && - h.Name != NoPrefetchLandmark && - h.Name != TOCTarName { - return - } - } - } - - aTar := tar.NewReader(aGz) - bTar := tar.NewReader(bGz) - for { - // Fetch and parse next header. - aH, aErr := next(aTar) - bH, bErr := next(bTar) - if aErr != nil || bErr != nil { - if aErr == io.EOF && bErr == io.EOF { - break - } - t.Fatalf("Failed to parse tar file: A: %v, B: %v", aErr, bErr) - } - if !reflect.DeepEqual(aH, bH) { - t.Logf("different header (A = %v; B = %v)", aH, bH) - return false - - } - aFile, err := io.ReadAll(aTar) - if err != nil { - t.Fatal("failed to read tar payload of A") - } - bFile, err := io.ReadAll(bTar) - if err != nil { - t.Fatal("failed to read tar payload of B") - } - if !bytes.Equal(aFile, bFile) { - t.Logf("different tar payload (A = %q; B = %q)", string(a), string(b)) - return false - } - } - - return true -} - -func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { - aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla) - if err != nil { - t.Fatalf("failed to parse A: %v", err) - } - bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb) - if err != nil { - t.Fatalf("failed to parse B: %v", err) - } - t.Logf("A: TOCJSON: %v", dumpTOCJSON(t, aJTOC)) - t.Logf("B: TOCJSON: %v", dumpTOCJSON(t, bJTOC)) - return aJTOC.Version == bJTOC.Version -} - -func isSameEntries(t *testing.T, a, b *Reader) bool { - aroot, ok := a.Lookup("") - if !ok { - t.Fatalf("failed to get root of A") - } - broot, ok := b.Lookup("") - if !ok { - t.Fatalf("failed to get root of B") - } - aEntry := stargzEntry{aroot, a} - bEntry := stargzEntry{broot, b} - return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry) -} - -func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader { - buf := new(bytes.Buffer) - var w io.WriteCloser - var err error - if srcCompression == gzipType { - w = gzip.NewWriter(buf) - } else if srcCompression == zstdType { - w, err = zstd.NewWriter(buf) - if err != nil { - t.Fatalf("failed to init zstd writer: %v", err) - } - } else { - return src - } - src.Seek(0, io.SeekStart) - if _, err := io.Copy(w, src); err != nil { - t.Fatalf("failed to compress source") - } - if err := w.Close(); err != nil { - t.Fatalf("failed to finalize compress source") - } - data := buf.Bytes() - return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) - -} - -type stargzEntry struct { - e *TOCEntry - r *Reader -} - -// contains checks if all child entries in "b" are also contained in "a". -// This function also checks if the files/chunks contain the same contents among "a" and "b". -func contains(t *testing.T, a, b stargzEntry) bool { - ae, ar := a.e, a.r - be, br := b.e, b.r - t.Logf("Comparing: %q vs %q", ae.Name, be.Name) - if !equalEntry(ae, be) { - t.Logf("%q != %q: entry: a: %v, b: %v", ae.Name, be.Name, ae, be) - return false - } - if ae.Type == "dir" { - t.Logf("Directory: %q vs %q: %v vs %v", ae.Name, be.Name, - allChildrenName(ae), allChildrenName(be)) - iscontain := true - ae.ForeachChild(func(aBaseName string, aChild *TOCEntry) bool { - // Walk through all files on this stargz file. - - if aChild.Name == PrefetchLandmark || - aChild.Name == NoPrefetchLandmark { - return true // Ignore landmarks - } - - // Ignore a TOCEntry of "./" (formated as "" by stargz lib) on root directory - // because this points to the root directory itself. - if aChild.Name == "" && ae.Name == "" { - return true - } - - bChild, ok := be.LookupChild(aBaseName) - if !ok { - t.Logf("%q (base: %q): not found in b: %v", - ae.Name, aBaseName, allChildrenName(be)) - iscontain = false - return false - } - - childcontain := contains(t, stargzEntry{aChild, a.r}, stargzEntry{bChild, b.r}) - if !childcontain { - t.Logf("%q != %q: non-equal dir", ae.Name, be.Name) - iscontain = false - return false - } - return true - }) - return iscontain - } else if ae.Type == "reg" { - af, err := ar.OpenFile(ae.Name) - if err != nil { - t.Fatalf("failed to open file %q on A: %v", ae.Name, err) - } - bf, err := br.OpenFile(be.Name) - if err != nil { - t.Fatalf("failed to open file %q on B: %v", be.Name, err) - } - - var nr int64 - for nr < ae.Size { - abytes, anext, aok := readOffset(t, af, nr, a) - bbytes, bnext, bok := readOffset(t, bf, nr, b) - if !aok && !bok { - break - } else if !(aok && bok) || anext != bnext { - t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v", - ae.Name, be.Name, nr, aok, bok, anext, bnext) - return false - } - nr = anext - if !bytes.Equal(abytes, bbytes) { - t.Logf("%q != %q: different contents %v vs %v", - ae.Name, be.Name, string(abytes), string(bbytes)) - return false - } - } - return true - } - - return true -} - -func allChildrenName(e *TOCEntry) (children []string) { - e.ForeachChild(func(baseName string, _ *TOCEntry) bool { - children = append(children, baseName) - return true - }) - return -} - -func equalEntry(a, b *TOCEntry) bool { - // Here, we selectively compare fileds that we are interested in. - return a.Name == b.Name && - a.Type == b.Type && - a.Size == b.Size && - a.ModTime3339 == b.ModTime3339 && - a.Stat().ModTime().Equal(b.Stat().ModTime()) && // modTime time.Time - a.LinkName == b.LinkName && - a.Mode == b.Mode && - a.UID == b.UID && - a.GID == b.GID && - a.Uname == b.Uname && - a.Gname == b.Gname && - (a.Offset >= 0) == (b.Offset >= 0) && - (a.NextOffset() > 0) == (b.NextOffset() > 0) && - a.DevMajor == b.DevMajor && - a.DevMinor == b.DevMinor && - a.NumLink == b.NumLink && - reflect.DeepEqual(a.Xattrs, b.Xattrs) && - // chunk-related infomations aren't compared in this function. - // ChunkOffset int64 `json:"chunkOffset,omitempty"` - // ChunkSize int64 `json:"chunkSize,omitempty"` - // children map[string]*TOCEntry - a.Digest == b.Digest -} - -func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { - ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset) - if !ok { - return nil, 0, false - } - data := make([]byte, ce.ChunkSize) - t.Logf("Offset: %v, NextOffset: %v", ce.Offset, ce.NextOffset()) - n, err := r.ReadAt(data, ce.ChunkOffset) - if err != nil { - t.Fatalf("failed to read file payload of %q (offset:%d,size:%d): %v", - e.e.Name, ce.ChunkOffset, ce.ChunkSize, err) - } - if int64(n) != ce.ChunkSize { - t.Fatalf("unexpected copied data size %d; want %d", - n, ce.ChunkSize) - } - return data[:n], offset + ce.ChunkSize, true -} - -func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { - jtocData, err := json.Marshal(*tocJSON) - if err != nil { - t.Fatalf("failed to marshal TOC JSON: %v", err) - } - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, bytes.NewReader(jtocData)); err != nil { - t.Fatalf("failed to read toc json blob: %v", err) - } - return buf.String() -} - -const chunkSize = 3 - -// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) -type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) - -// testDigestAndVerify runs specified checks against sample stargz blobs. -func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { - tests := []struct { - name string - tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) - checks []check - minChunkSize []int - }{ - { - name: "no-regfile", - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { - return tarOf( - dir("test/"), - ) - }, - checks: []check{ - checkStargzTOC, - checkVerifyTOC, - checkVerifyInvalidStargzFail(buildTar(t, tarOf( - dir("test2/"), // modified - ), allowedPrefix[0])), - }, - }, - { - name: "small-files", - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { - return tarOf( - regDigest(t, "baz.txt", "", dgstMap), - regDigest(t, "foo.txt", "a", dgstMap), - dir("test/"), - regDigest(t, "test/bar.txt", "bbb", dgstMap), - ) - }, - minChunkSize: []int{0, 64000}, - checks: []check{ - checkStargzTOC, - checkVerifyTOC, - checkVerifyInvalidStargzFail(buildTar(t, tarOf( - file("baz.txt", ""), - file("foo.txt", "M"), // modified - dir("test/"), - file("test/bar.txt", "bbb"), - ), allowedPrefix[0])), - // checkVerifyInvalidTOCEntryFail("foo.txt"), // TODO - checkVerifyBrokenContentFail("foo.txt"), - }, - }, - { - name: "big-files", - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { - return tarOf( - regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), - regDigest(t, "foo.txt", "a", dgstMap), - dir("test/"), - regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), - ) - }, - checks: []check{ - checkStargzTOC, - checkVerifyTOC, - checkVerifyInvalidStargzFail(buildTar(t, tarOf( - file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified - file("foo.txt", "a"), - dir("test/"), - file("test/bar.txt", "testbartestbar"), - ), allowedPrefix[0])), - checkVerifyInvalidTOCEntryFail("test/bar.txt"), - checkVerifyBrokenContentFail("test/bar.txt"), - }, - }, - { - name: "with-non-regfiles", - minChunkSize: []int{0, 64000}, - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { - return tarOf( - regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), - regDigest(t, "foo.txt", "a", dgstMap), - regDigest(t, "bar/foo2.txt", "b", dgstMap), - regDigest(t, "foo3.txt", "c", dgstMap), - symlink("barlink", "test/bar.txt"), - dir("test/"), - regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), - dir("test2/"), - link("test2/bazlink", "baz.txt"), - ) - }, - checks: []check{ - checkStargzTOC, - checkVerifyTOC, - checkVerifyInvalidStargzFail(buildTar(t, tarOf( - file("baz.txt", "bazbazbazbazbazbazbaz"), - file("foo.txt", "a"), - file("bar/foo2.txt", "b"), - file("foo3.txt", "c"), - symlink("barlink", "test/bar.txt"), - dir("test/"), - file("test/bar.txt", "testbartestbar"), - dir("test2/"), - link("test2/bazlink", "foo.txt"), // modified - ), allowedPrefix[0])), - checkVerifyInvalidTOCEntryFail("test/bar.txt"), - checkVerifyBrokenContentFail("test/bar.txt"), - }, - }, - } - - for _, tt := range tests { - if len(tt.minChunkSize) == 0 { - tt.minChunkSize = []int{0} - } - for _, srcCompression := range srcCompressions { - srcCompression := srcCompression - for _, newCL := range controllers { - newCL := newCL - for _, prefix := range allowedPrefix { - prefix := prefix - for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { - srcTarFormat := srcTarFormat - for _, minChunkSize := range tt.minChunkSize { - minChunkSize := minChunkSize - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) { - // Get original tar file and chunk digests - dgstMap := make(map[string]digest.Digest) - tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) - - cl := newCL() - rc, err := Build(compressBlob(t, tarBlob, srcCompression), - WithChunkSize(chunkSize), WithCompression(cl)) - if err != nil { - t.Fatalf("failed to convert stargz: %v", err) - } - tocDigest := rc.TOCDigest() - defer rc.Close() - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, rc); err != nil { - t.Fatalf("failed to copy built stargz blob: %v", err) - } - newStargz := buf.Bytes() - // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. - dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) - - for _, check := range tt.checks { - check(t, newStargz, tocDigest, dgstMap, cl, newCL) - } - }) - } - } - } - } - } - } -} - -// checkStargzTOC checks the TOC JSON of the passed stargz has the expected -// digest and contains valid chunks. It walks all entries in the stargz and -// checks all chunk digests stored to the TOC JSON match the actual contents. -func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { - sgz, err := Open( - io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), - WithDecompressors(controller), - ) - if err != nil { - t.Errorf("failed to parse converted stargz: %v", err) - return - } - digestMapTOC, err := listDigests(io.NewSectionReader( - bytes.NewReader(sgzData), 0, int64(len(sgzData))), - controller, - ) - if err != nil { - t.Fatalf("failed to list digest: %v", err) - } - found := make(map[string]bool) - for id := range dgstMap { - found[id] = false - } - zr, err := controller.Reader(bytes.NewReader(sgzData)) - if err != nil { - t.Fatalf("failed to decompress converted stargz: %v", err) - } - defer zr.Close() - tr := tar.NewReader(zr) - for { - h, err := tr.Next() - if err != nil { - if err != io.EOF { - t.Errorf("failed to read tar entry: %v", err) - return - } - break - } - if h.Name == TOCTarName { - // Check the digest of TOC JSON based on the actual contents - // It's sure that TOC JSON exists in this archive because - // Open succeeded. - dgstr := digest.Canonical.Digester() - if _, err := io.Copy(dgstr.Hash(), tr); err != nil { - t.Fatalf("failed to calculate digest of TOC JSON: %v", - err) - } - if dgstr.Digest() != tocDigest { - t.Errorf("invalid TOC JSON %q; want %q", tocDigest, dgstr.Digest()) - } - continue - } - if _, ok := sgz.Lookup(h.Name); !ok { - t.Errorf("lost stargz entry %q in the converted TOC", h.Name) - return - } - var n int64 - for n < h.Size { - ce, ok := sgz.ChunkEntryForOffset(h.Name, n) - if !ok { - t.Errorf("lost chunk %q(offset=%d) in the converted TOC", - h.Name, n) - return - } - - // Get the original digest to make sure the file contents are kept unchanged - // from the original tar, during the whole conversion steps. - id := chunkID(h.Name, n, ce.ChunkSize) - want, ok := dgstMap[id] - if !ok { - t.Errorf("Unexpected chunk %q(offset=%d,size=%d): %v", - h.Name, n, ce.ChunkSize, dgstMap) - return - } - found[id] = true - - // Check the file contents - dgstr := digest.Canonical.Digester() - if _, err := io.CopyN(dgstr.Hash(), tr, ce.ChunkSize); err != nil { - t.Fatalf("failed to calculate digest of %q (offset=%d,size=%d)", - h.Name, n, ce.ChunkSize) - } - if want != dgstr.Digest() { - t.Errorf("Invalid contents in converted stargz %q: %q; want %q", - h.Name, dgstr.Digest(), want) - return - } - - // Check the digest stored in TOC JSON - dgstTOC, ok := digestMapTOC[ce.Offset] - if !ok { - t.Errorf("digest of %q(offset=%d,size=%d,chunkOffset=%d) isn't registered", - h.Name, ce.Offset, ce.ChunkSize, ce.ChunkOffset) - } - if want != dgstTOC { - t.Errorf("Invalid digest in TOCEntry %q: %q; want %q", - h.Name, dgstTOC, want) - return - } - - n += ce.ChunkSize - } - } - - for id, ok := range found { - if !ok { - t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) - } - } -} - -// checkVerifyTOC checks the verification works for the TOC JSON of the passed -// stargz. It walks all entries in the stargz and checks the verifications for -// all chunks work. -func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { - sgz, err := Open( - io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), - WithDecompressors(controller), - ) - if err != nil { - t.Errorf("failed to parse converted stargz: %v", err) - return - } - ev, err := sgz.VerifyTOC(tocDigest) - if err != nil { - t.Errorf("failed to verify stargz: %v", err) - return - } - - found := make(map[string]bool) - for id := range dgstMap { - found[id] = false - } - zr, err := controller.Reader(bytes.NewReader(sgzData)) - if err != nil { - t.Fatalf("failed to decompress converted stargz: %v", err) - } - defer zr.Close() - tr := tar.NewReader(zr) - for { - h, err := tr.Next() - if err != nil { - if err != io.EOF { - t.Errorf("failed to read tar entry: %v", err) - return - } - break - } - if h.Name == TOCTarName { - continue - } - if _, ok := sgz.Lookup(h.Name); !ok { - t.Errorf("lost stargz entry %q in the converted TOC", h.Name) - return - } - var n int64 - for n < h.Size { - ce, ok := sgz.ChunkEntryForOffset(h.Name, n) - if !ok { - t.Errorf("lost chunk %q(offset=%d) in the converted TOC", - h.Name, n) - return - } - - v, err := ev.Verifier(ce) - if err != nil { - t.Errorf("failed to get verifier for %q(offset=%d)", h.Name, n) - } - - found[chunkID(h.Name, n, ce.ChunkSize)] = true - - // Check the file contents - if _, err := io.CopyN(v, tr, ce.ChunkSize); err != nil { - t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", - h.Name, n, ce.ChunkSize) - } - if !v.Verified() { - t.Errorf("Invalid contents in converted stargz %q (should be succeeded)", - h.Name) - return - } - n += ce.ChunkSize - } - } - - for id, ok := range found { - if !ok { - t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) - } - } -} - -// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be -// detected during the verification and the verification returns an error. -func checkVerifyInvalidTOCEntryFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { - funcs := map[string]rewriteFunc{ - "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { - var found bool - for _, e := range toc.Entries { - if cleanEntryName(e.Name) == filename { - if e.Type != "reg" && e.Type != "chunk" { - t.Fatalf("entry %q to break must be regfile or chunk", filename) - } - if e.ChunkDigest == "" { - t.Fatalf("entry %q is already invalid", filename) - } - e.ChunkDigest = "" - found = true - } - } - if !found { - t.Fatalf("rewrite target not found") - } - }, - "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { - var ( - sampleEntry *TOCEntry - targetEntry *TOCEntry - ) - for _, e := range toc.Entries { - if e.Type == "reg" || e.Type == "chunk" { - if cleanEntryName(e.Name) == filename { - targetEntry = e - } else { - sampleEntry = e - } - } - } - if sampleEntry == nil { - t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") - } - if targetEntry == nil { - t.Fatalf("rewrite target not found") - } - targetEntry.Offset = sampleEntry.Offset - }, - } - - for name, rFunc := range funcs { - t.Run(name, func(t *testing.T) { - newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller) - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, newSgz); err != nil { - t.Fatalf("failed to get converted stargz") - } - isgz := buf.Bytes() - - sgz, err := Open( - io.NewSectionReader(bytes.NewReader(isgz), 0, int64(len(isgz))), - WithDecompressors(controller), - ) - if err != nil { - t.Fatalf("failed to parse converted stargz: %v", err) - return - } - _, err = sgz.VerifyTOC(newTocDigest) - if err == nil { - t.Errorf("must fail for invalid TOC") - return - } - }) - } - } -} - -// checkVerifyInvalidStargzFail checks if the verification detects that the -// given stargz file doesn't match to the expected digest and returns error. -func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { - cl := newController() - rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl)) - if err != nil { - t.Fatalf("failed to convert stargz: %v", err) - } - defer rc.Close() - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, rc); err != nil { - t.Fatalf("failed to copy built stargz blob: %v", err) - } - mStargz := buf.Bytes() - - sgz, err := Open( - io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))), - WithDecompressors(cl), - ) - if err != nil { - t.Fatalf("failed to parse converted stargz: %v", err) - return - } - _, err = sgz.VerifyTOC(tocDigest) - if err == nil { - t.Errorf("must fail for invalid TOC") - return - } - } -} - -// checkVerifyBrokenContentFail checks if the verifier detects broken contents -// that doesn't match to the expected digest and returns error. -func checkVerifyBrokenContentFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { - // Parse stargz file - sgz, err := Open( - io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), - WithDecompressors(controller), - ) - if err != nil { - t.Fatalf("failed to parse converted stargz: %v", err) - return - } - ev, err := sgz.VerifyTOC(tocDigest) - if err != nil { - t.Fatalf("failed to verify stargz: %v", err) - return - } - - // Open the target file - sr, err := sgz.OpenFile(filename) - if err != nil { - t.Fatalf("failed to open file %q", filename) - } - ce, ok := sgz.ChunkEntryForOffset(filename, 0) - if !ok { - t.Fatalf("lost chunk %q(offset=%d) in the converted TOC", filename, 0) - return - } - if ce.ChunkSize == 0 { - t.Fatalf("file mustn't be empty") - return - } - data := make([]byte, ce.ChunkSize) - if _, err := sr.ReadAt(data, ce.ChunkOffset); err != nil { - t.Errorf("failed to get data of a chunk of %q(offset=%q)", - filename, ce.ChunkOffset) - } - - // Check the broken chunk (must fail) - v, err := ev.Verifier(ce) - if err != nil { - t.Fatalf("failed to get verifier for %q", filename) - } - broken := append([]byte{^data[0]}, data[1:]...) - if _, err := io.CopyN(v, bytes.NewReader(broken), ce.ChunkSize); err != nil { - t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", - filename, ce.ChunkOffset, ce.ChunkSize) - } - if v.Verified() { - t.Errorf("verification must fail for broken file chunk %q(org:%q,broken:%q)", - filename, data, broken) - } - } -} - -func chunkID(name string, offset, size int64) string { - return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size) -} - -type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader) - -func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { - decodedJTOC, jtocOffset, err := parseStargz(sgz, controller) - if err != nil { - t.Fatalf("failed to extract TOC JSON: %v", err) - } - - rewrite(t, decodedJTOC, sgz) - - tocFooter, tocDigest, err := tocAndFooter(controller, decodedJTOC, jtocOffset) - if err != nil { - t.Fatalf("failed to create toc and footer: %v", err) - } - - // Reconstruct stargz file with the modified TOC JSON - if _, err := sgz.Seek(0, io.SeekStart); err != nil { - t.Fatalf("failed to reset the seek position of stargz: %v", err) - } - return io.MultiReader( - io.LimitReader(sgz, jtocOffset), // Original stargz (before TOC JSON) - tocFooter, // Rewritten TOC and footer - ), tocDigest -} - -func listDigests(sgz *io.SectionReader, controller TestingController) (map[int64]digest.Digest, error) { - decodedJTOC, _, err := parseStargz(sgz, controller) - if err != nil { - return nil, err - } - digestMap := make(map[int64]digest.Digest) - for _, e := range decodedJTOC.Entries { - if e.Type == "reg" || e.Type == "chunk" { - if e.Type == "reg" && e.Size == 0 { - continue // ignores empty file - } - if e.ChunkDigest == "" { - return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON", - e.Name, e.Offset) - } - d, err := digest.Parse(e.ChunkDigest) - if err != nil { - return nil, err - } - digestMap[e.Offset] = d - } - } - return digestMap, nil -} - -func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJTOC *JTOC, jtocOffset int64, err error) { - fSize := controller.FooterSize() - footer := make([]byte, fSize) - if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil { - return nil, 0, fmt.Errorf("error reading footer: %w", err) - } - _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):]) - if err != nil { - return nil, 0, fmt.Errorf("failed to parse footer: %w", err) - } - - // Decode the TOC JSON - var tocReader io.Reader - if tocOffset >= 0 { - tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) - } - decodedJTOC, _, err = controller.ParseTOC(tocReader) - if err != nil { - return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) - } - return decodedJTOC, tocOffset, nil -} - -func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { - const content = "Some contents" - invalidUtf8 := "\xff\xfe\xfd" - - xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8} - sampleOwner := owner{uid: 50, gid: 100} - - data64KB := randomContents(64000) - - tests := []struct { - name string - chunkSize int - minChunkSize int - in []tarEntry - want []stargzCheck - wantNumGz int // expected number of streams - - wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz - wantFailOnLossLess bool - wantTOCVersion int // default = 1 - }{ - { - name: "empty", - in: tarOf(), - wantNumGz: 2, // (empty tar) + TOC + footer - want: checks( - numTOCEntries(0), - ), - }, - { - name: "1dir_1empty_file", - in: tarOf( - dir("foo/"), - file("foo/bar.txt", ""), - ), - wantNumGz: 3, // dir, TOC, footer - want: checks( - numTOCEntries(2), - hasDir("foo/"), - hasFileLen("foo/bar.txt", 0), - entryHasChildren("foo", "bar.txt"), - hasFileDigest("foo/bar.txt", digestFor("")), - ), - }, - { - name: "1dir_1file", - in: tarOf( - dir("foo/"), - file("foo/bar.txt", content, xAttrFile), - ), - wantNumGz: 4, // var dir, foo.txt alone, TOC, footer - want: checks( - numTOCEntries(2), - hasDir("foo/"), - hasFileLen("foo/bar.txt", len(content)), - hasFileDigest("foo/bar.txt", digestFor(content)), - hasFileContentsRange("foo/bar.txt", 0, content), - hasFileContentsRange("foo/bar.txt", 1, content[1:]), - entryHasChildren("", "foo"), - entryHasChildren("foo", "bar.txt"), - hasFileXattrs("foo/bar.txt", "foo", "bar"), - hasFileXattrs("foo/bar.txt", "invalid-utf8", invalidUtf8), - ), - }, - { - name: "2meta_2file", - in: tarOf( - dir("bar/", sampleOwner), - dir("foo/", sampleOwner), - file("foo/bar.txt", content, sampleOwner), - ), - wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer - want: checks( - numTOCEntries(3), - hasDir("bar/"), - hasDir("foo/"), - hasFileLen("foo/bar.txt", len(content)), - entryHasChildren("", "bar", "foo"), - entryHasChildren("foo", "bar.txt"), - hasChunkEntries("foo/bar.txt", 1), - hasEntryOwner("bar/", sampleOwner), - hasEntryOwner("foo/", sampleOwner), - hasEntryOwner("foo/bar.txt", sampleOwner), - ), - }, - { - name: "3dir", - in: tarOf( - dir("bar/"), - dir("foo/"), - dir("foo/bar/"), - ), - wantNumGz: 3, // 3 dirs, TOC, footer - want: checks( - hasDirLinkCount("bar/", 2), - hasDirLinkCount("foo/", 3), - hasDirLinkCount("foo/bar/", 2), - ), - }, - { - name: "symlink", - in: tarOf( - dir("foo/"), - symlink("foo/bar", "../../x"), - ), - wantNumGz: 3, // metas + TOC + footer - want: checks( - numTOCEntries(2), - hasSymlink("foo/bar", "../../x"), - entryHasChildren("", "foo"), - entryHasChildren("foo", "bar"), - ), - }, - { - name: "chunked_file", - chunkSize: 4, - in: tarOf( - dir("foo/"), - file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"), - ), - wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer - want: checks( - numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file - hasDir("foo/"), - hasFileLen("foo/big.txt", len("This is such a big file")), - hasFileDigest("foo/big.txt", digestFor("This is such a big file")), - hasFileContentsRange("foo/big.txt", 0, "This is such a big file"), - hasFileContentsRange("foo/big.txt", 1, "his is such a big file"), - hasFileContentsRange("foo/big.txt", 2, "is is such a big file"), - hasFileContentsRange("foo/big.txt", 3, "s is such a big file"), - hasFileContentsRange("foo/big.txt", 4, " is such a big file"), - hasFileContentsRange("foo/big.txt", 5, "is such a big file"), - hasFileContentsRange("foo/big.txt", 6, "s such a big file"), - hasFileContentsRange("foo/big.txt", 7, " such a big file"), - hasFileContentsRange("foo/big.txt", 8, "such a big file"), - hasFileContentsRange("foo/big.txt", 9, "uch a big file"), - hasFileContentsRange("foo/big.txt", 10, "ch a big file"), - hasFileContentsRange("foo/big.txt", 11, "h a big file"), - hasFileContentsRange("foo/big.txt", 12, " a big file"), - hasFileContentsRange("foo/big.txt", len("This is such a big file")-1, ""), - hasChunkEntries("foo/big.txt", 6), - ), - }, - { - name: "recursive", - in: tarOf( - dir("/", sampleOwner), - dir("bar/", sampleOwner), - dir("foo/", sampleOwner), - file("foo/bar.txt", content, sampleOwner), - ), - wantNumGz: 4, // dirs, bar.txt alone, TOC, footer - want: checks( - maxDepth(2), // 0: root directory, 1: "foo/", 2: "bar.txt" - ), - }, - { - name: "block_char_fifo", - in: tarOf( - tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { - return w.WriteHeader(&tar.Header{ - Name: prefix + "b", - Typeflag: tar.TypeBlock, - Devmajor: 123, - Devminor: 456, - Format: format, - }) - }), - tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { - return w.WriteHeader(&tar.Header{ - Name: prefix + "c", - Typeflag: tar.TypeChar, - Devmajor: 111, - Devminor: 222, - Format: format, - }) - }), - tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { - return w.WriteHeader(&tar.Header{ - Name: prefix + "f", - Typeflag: tar.TypeFifo, - Format: format, - }) - }), - ), - wantNumGz: 3, - want: checks( - lookupMatch("b", &TOCEntry{Name: "b", Type: "block", DevMajor: 123, DevMinor: 456, NumLink: 1}), - lookupMatch("c", &TOCEntry{Name: "c", Type: "char", DevMajor: 111, DevMinor: 222, NumLink: 1}), - lookupMatch("f", &TOCEntry{Name: "f", Type: "fifo", NumLink: 1}), - ), - }, - { - name: "modes", - in: tarOf( - dir("foo1/", 0755|os.ModeDir|os.ModeSetgid), - file("foo1/bar1", content, 0700|os.ModeSetuid), - file("foo1/bar2", content, 0755|os.ModeSetgid), - dir("foo2/", 0755|os.ModeDir|os.ModeSticky), - file("foo2/bar3", content, 0755|os.ModeSticky), - dir("foo3/", 0755|os.ModeDir), - file("foo3/bar4", content, os.FileMode(0700)), - file("foo3/bar5", content, os.FileMode(0755)), - ), - wantNumGz: 8, // dir, bar1 alone, bar2 alone + dir, bar3 alone + dir, bar4 alone, bar5 alone, TOC, footer - want: checks( - hasMode("foo1/", 0755|os.ModeDir|os.ModeSetgid), - hasMode("foo1/bar1", 0700|os.ModeSetuid), - hasMode("foo1/bar2", 0755|os.ModeSetgid), - hasMode("foo2/", 0755|os.ModeDir|os.ModeSticky), - hasMode("foo2/bar3", 0755|os.ModeSticky), - hasMode("foo3/", 0755|os.ModeDir), - hasMode("foo3/bar4", os.FileMode(0700)), - hasMode("foo3/bar5", os.FileMode(0755)), - ), - }, - { - name: "lossy", - in: tarOf( - dir("bar/", sampleOwner), - dir("foo/", sampleOwner), - file("foo/bar.txt", content, sampleOwner), - file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error) - ), - wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer - want: checks( - numTOCEntries(3), - hasDir("bar/"), - hasDir("foo/"), - hasFileLen("foo/bar.txt", len(content)), - entryHasChildren("", "bar", "foo"), - entryHasChildren("foo", "bar.txt"), - hasChunkEntries("foo/bar.txt", 1), - hasEntryOwner("bar/", sampleOwner), - hasEntryOwner("foo/", sampleOwner), - hasEntryOwner("foo/bar.txt", sampleOwner), - ), - wantFailOnLossLess: true, - }, - { - name: "hardlink should be replaced to the destination entry", - in: tarOf( - dir("foo/"), - file("foo/foo1", "test"), - link("foolink", "foo/foo1"), - ), - wantNumGz: 4, // dir, foo1 + link, TOC, footer - want: checks( - mustSameEntry("foo/foo1", "foolink"), - ), - }, - { - name: "several_files_in_chunk", - minChunkSize: 8000, - in: tarOf( - dir("foo/"), - file("foo/foo1", data64KB), - file("foo2", "bb"), - file("foo22", "ccc"), - dir("bar/"), - file("bar/bar.txt", "aaa"), - file("foo3", data64KB), - ), - // NOTE: we assume that the compressed "data64KB" is still larger than 8KB - wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer - want: checks( - numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3 - hasDir("foo/"), - hasDir("bar/"), - hasFileLen("foo/foo1", len(data64KB)), - hasFileLen("foo2", len("bb")), - hasFileLen("foo22", len("ccc")), - hasFileLen("bar/bar.txt", len("aaa")), - hasFileLen("foo3", len(data64KB)), - hasFileDigest("foo/foo1", digestFor(data64KB)), - hasFileDigest("foo2", digestFor("bb")), - hasFileDigest("foo22", digestFor("ccc")), - hasFileDigest("bar/bar.txt", digestFor("aaa")), - hasFileDigest("foo3", digestFor(data64KB)), - hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}), - hasFileContentsRange("foo/foo1", 0, data64KB), - hasFileContentsRange("foo2", 0, "bb"), - hasFileContentsRange("foo2", 1, "b"), - hasFileContentsRange("foo22", 0, "ccc"), - hasFileContentsRange("foo22", 1, "cc"), - hasFileContentsRange("foo22", 2, "c"), - hasFileContentsRange("bar/bar.txt", 0, "aaa"), - hasFileContentsRange("bar/bar.txt", 1, "aa"), - hasFileContentsRange("bar/bar.txt", 2, "a"), - hasFileContentsRange("foo3", 0, data64KB), - hasFileContentsRange("foo3", 1, data64KB[1:]), - hasFileContentsRange("foo3", 2, data64KB[2:]), - hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), - hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), - ), - }, - { - name: "several_files_in_chunk_chunked", - minChunkSize: 8000, - chunkSize: 32000, - in: tarOf( - dir("foo/"), - file("foo/foo1", data64KB), - file("foo2", "bb"), - dir("bar/"), - file("foo3", data64KB), - ), - // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB - wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer - want: checks( - numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks) - hasDir("foo/"), - hasDir("bar/"), - hasFileLen("foo/foo1", len(data64KB)), - hasFileLen("foo2", len("bb")), - hasFileLen("foo3", len(data64KB)), - hasFileDigest("foo/foo1", digestFor(data64KB)), - hasFileDigest("foo2", digestFor("bb")), - hasFileDigest("foo3", digestFor(data64KB)), - hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}), - hasFileContentsRange("foo/foo1", 0, data64KB), - hasFileContentsRange("foo/foo1", 1, data64KB[1:]), - hasFileContentsRange("foo/foo1", 2, data64KB[2:]), - hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]), - hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]), - hasFileContentsRange("foo2", 0, "bb"), - hasFileContentsRange("foo2", 1, "b"), - hasFileContentsRange("foo3", 0, data64KB), - hasFileContentsRange("foo3", 1, data64KB[1:]), - hasFileContentsRange("foo3", 2, data64KB[2:]), - hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), - hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), - ), - }, - } - - for _, tt := range tests { - for _, newCL := range controllers { - newCL := newCL - for _, prefix := range allowedPrefix { - prefix := prefix - for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { - srcTarFormat := srcTarFormat - for _, lossless := range []bool{true, false} { - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) { - var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) - origTarDgstr := digest.Canonical.Digester() - tr = io.TeeReader(tr, origTarDgstr.Hash()) - var stargzBuf bytes.Buffer - cl1 := newCL() - w := NewWriterWithCompressor(&stargzBuf, cl1) - w.ChunkSize = tt.chunkSize - w.MinChunkSize = tt.minChunkSize - if lossless { - err := w.AppendTarLossLess(tr) - if tt.wantFailOnLossLess { - if err != nil { - return // expected to fail - } - t.Fatalf("Append wanted to fail on lossless") - } - if err != nil { - t.Fatalf("Append(lossless): %v", err) - } - } else { - if err := w.AppendTar(tr); err != nil { - t.Fatalf("Append: %v", err) - } - } - if _, err := w.Close(); err != nil { - t.Fatalf("Writer.Close: %v", err) - } - b := stargzBuf.Bytes() - - if lossless { - // Check if the result blob reserves original tar metadata - rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1) - if err != nil { - t.Errorf("failed to decompress blob: %v", err) - return - } - defer rc.Close() - resultDgstr := digest.Canonical.Digester() - if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil { - t.Errorf("failed to read result decompressed blob: %v", err) - return - } - if resultDgstr.Digest() != origTarDgstr.Digest() { - t.Errorf("lossy compression occurred: digest=%v; want %v", - resultDgstr.Digest(), origTarDgstr.Digest()) - return - } - } - - diffID := w.DiffID() - wantDiffID := cl1.DiffIDOf(t, b) - if diffID != wantDiffID { - t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) - } - - telemetry, checkCalled := newCalledTelemetry() - sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))) - r, err := Open( - sr, - WithDecompressors(cl1), - WithTelemetry(telemetry), - ) - if err != nil { - t.Fatalf("stargz.Open: %v", err) - } - wantTOCVersion := 1 - if tt.wantTOCVersion > 0 { - wantTOCVersion = tt.wantTOCVersion - } - if r.toc.Version != wantTOCVersion { - t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion) - } - - footerSize := cl1.FooterSize() - footerOffset := sr.Size() - footerSize - footer := make([]byte, footerSize) - if _, err := sr.ReadAt(footer, footerOffset); err != nil { - t.Errorf("failed to read footer: %v", err) - } - _, tocOffset, _, err := cl1.ParseFooter(footer) - if err != nil { - t.Errorf("failed to parse footer: %v", err) - } - if err := checkCalled(tocOffset >= 0); err != nil { - t.Errorf("telemetry failure: %v", err) - } - - wantNumGz := tt.wantNumGz - if lossless && tt.wantNumGzLossLess > 0 { - wantNumGz = tt.wantNumGzLossLess - } - streamOffsets := []int64{0} - prevOffset := int64(-1) - streams := 0 - for _, e := range r.toc.Entries { - if e.Offset > prevOffset { - streamOffsets = append(streamOffsets, e.Offset) - prevOffset = e.Offset - streams++ - } - } - streams++ // TOC - if tocOffset >= 0 { - // toc is in the blob - streamOffsets = append(streamOffsets, tocOffset) - } - streams++ // footer - streamOffsets = append(streamOffsets, footerOffset) - if streams != wantNumGz { - t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz) - } - - t.Logf("testing streams: %+v", streamOffsets) - cl1.TestStreams(t, b, streamOffsets) - - for _, want := range tt.want { - want.check(t, r) - } - }) - } - } - } - } - } -} - -type chunkInfo struct { - name string - data string -} - -func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) { - var getFooterLatencyCalled bool - var getTocLatencyCalled bool - var deserializeTocLatencyCalled bool - return &Telemetry{ - func(time.Time) { getFooterLatencyCalled = true }, - func(time.Time) { getTocLatencyCalled = true }, - func(time.Time) { deserializeTocLatencyCalled = true }, - }, func(needsGetTOC bool) error { - var allErr []error - if !getFooterLatencyCalled { - allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) - } - if needsGetTOC { - if !getTocLatencyCalled { - allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) - } - } - if !deserializeTocLatencyCalled { - allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) - } - return errorutil.Aggregate(allErr) - } -} - -func digestFor(content string) string { - sum := sha256.Sum256([]byte(content)) - return fmt.Sprintf("sha256:%x", sum) -} - -type numTOCEntries int - -func (n numTOCEntries) check(t *testing.T, r *Reader) { - if r.toc == nil { - t.Fatal("nil TOC") - } - if got, want := len(r.toc.Entries), int(n); got != want { - t.Errorf("got %d TOC entries; want %d", got, want) - } - t.Logf("got TOC entries:") - for i, ent := range r.toc.Entries { - entj, _ := json.Marshal(ent) - t.Logf(" [%d]: %s\n", i, entj) - } - if t.Failed() { - t.FailNow() - } -} - -func checks(s ...stargzCheck) []stargzCheck { return s } - -type stargzCheck interface { - check(t *testing.T, r *Reader) -} - -type stargzCheckFn func(*testing.T, *Reader) - -func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) } - -func maxDepth(max int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { - e, ok := r.Lookup("") - if !ok { - t.Fatal("root directory not found") - } - d, err := getMaxDepth(t, e, 0, 10*max) - if err != nil { - t.Errorf("failed to get max depth (wanted %d): %v", max, err) - return - } - if d != max { - t.Errorf("invalid depth %d; want %d", d, max) - return - } - }) -} - -func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) { - if current > limit { - return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d", - current, limit) - } - max = current - e.ForeachChild(func(baseName string, ent *TOCEntry) bool { - t.Logf("%q(basename:%q) is child of %q\n", ent.Name, baseName, e.Name) - d, err := getMaxDepth(t, ent, current+1, limit) - if err != nil { - rErr = err - return false - } - if d > max { - max = d - } - return true - }) - return -} - -func hasFileLen(file string, wantLen int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { - for _, ent := range r.toc.Entries { - if ent.Name == file { - if ent.Type != "reg" { - t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) - } else if ent.Size != int64(wantLen) { - t.Errorf("file size of %q = %d; want %d", file, ent.Size, wantLen) - } - return - } - } - t.Errorf("file %q not found", file) - }) -} - -func hasFileXattrs(file, name, value string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { - for _, ent := range r.toc.Entries { - if ent.Name == file { - if ent.Type != "reg" { - t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) - } - if ent.Xattrs == nil { - t.Errorf("file %q has no xattrs", file) - return - } - valueFound, found := ent.Xattrs[name] - if !found { - t.Errorf("file %q has no xattr %q", file, name) - return - } - if string(valueFound) != value { - t.Errorf("file %q has xattr %q with value %q instead of %q", file, name, valueFound, value) - } - - return - } - } - t.Errorf("file %q not found", file) - }) -} - -func hasFileDigest(file string, digest string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { - ent, ok := r.Lookup(file) - if !ok { - t.Fatalf("didn't find TOCEntry for file %q", file) - } - if ent.Digest != digest { - t.Fatalf("Digest(%q) = %q, want %q", file, ent.Digest, digest) - } - }) -} - -func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { - extraMap := make(map[string]chunkInfo) - for _, e := range extra { - extraMap[e.name] = e - } - var extraNames []string - for n := range extraMap { - extraNames = append(extraNames, n) - } - f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error { - t.Logf("On %q: got preread of %q", file, e.Name) - ex, ok := extraMap[e.Name] - if !ok { - t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames) - } - got, err := io.ReadAll(cr) - if err != nil { - t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err) - } - if ex.data != string(got) { - t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data)) - } - delete(extraMap, e.Name) - return nil - }) - if err != nil { - t.Fatal(err) - } - got := make([]byte, len(want)) - n, err := f.ReadAt(got, int64(offset)) - if err != nil { - t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err) - } - if string(got) != want { - t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) - } - if len(extraMap) != 0 { - var exNames []string - for _, ex := range extraMap { - exNames = append(exNames, ex.name) - } - t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames) - } - }) -} - -func hasFileContentsRange(file string, offset int, want string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { - f, err := r.OpenFile(file) - if err != nil { - t.Fatal(err) - } - got := make([]byte, len(want)) - n, err := f.ReadAt(got, int64(offset)) - if err != nil { - t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err) - } - if string(got) != want { - t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) - } - }) -} - -func hasChunkEntries(file string, wantChunks int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { - ent, ok := r.Lookup(file) - if !ok { - t.Fatalf("no file for %q", file) - } - if ent.Type != "reg" { - t.Fatalf("file %q has unexpected type %q; want reg", file, ent.Type) - } - chunks := r.getChunks(ent) - if len(chunks) != wantChunks { - t.Errorf("len(r.getChunks(%q)) = %d; want %d", file, len(chunks), wantChunks) - return - } - f := chunks[0] - - var gotChunks []*TOCEntry - var last *TOCEntry - for off := int64(0); off < f.Size; off++ { - e, ok := r.ChunkEntryForOffset(file, off) - if !ok { - t.Errorf("no ChunkEntryForOffset at %d", off) - return - } - if last != e { - gotChunks = append(gotChunks, e) - last = e - } - } - if !reflect.DeepEqual(chunks, gotChunks) { - t.Errorf("gotChunks=%d, want=%d; contents mismatch", len(gotChunks), wantChunks) - } - - // And verify the NextOffset - for i := 0; i < len(gotChunks)-1; i++ { - ci := gotChunks[i] - cnext := gotChunks[i+1] - if ci.NextOffset() != cnext.Offset { - t.Errorf("chunk %d NextOffset %d != next chunk's Offset of %d", i, ci.NextOffset(), cnext.Offset) - } - } - }) -} - -func entryHasChildren(dir string, want ...string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { - want := append([]string(nil), want...) - var got []string - ent, ok := r.Lookup(dir) - if !ok { - t.Fatalf("didn't find TOCEntry for dir node %q", dir) - } - for baseName := range ent.children { - got = append(got, baseName) - } - sort.Strings(got) - sort.Strings(want) - if !reflect.DeepEqual(got, want) { - t.Errorf("children of %q = %q; want %q", dir, got, want) - } - }) -} - -func hasDir(file string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { - for _, ent := range r.toc.Entries { - if ent.Name == cleanEntryName(file) { - if ent.Type != "dir" { - t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) - } - return - } - } - t.Errorf("directory %q not found", file) - }) -} - -func hasDirLinkCount(file string, count int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { - for _, ent := range r.toc.Entries { - if ent.Name == cleanEntryName(file) { - if ent.Type != "dir" { - t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) - return - } - if ent.NumLink != count { - t.Errorf("link count of %q = %d; want %d", file, ent.NumLink, count) - } - return - } - } - t.Errorf("directory %q not found", file) - }) -} - -func hasMode(file string, mode os.FileMode) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { - for _, ent := range r.toc.Entries { - if ent.Name == cleanEntryName(file) { - if ent.Stat().Mode() != mode { - t.Errorf("invalid mode: got %v; want %v", ent.Stat().Mode(), mode) - return - } - return - } - } - t.Errorf("file %q not found", file) - }) -} - -func hasSymlink(file, target string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { - for _, ent := range r.toc.Entries { - if ent.Name == file { - if ent.Type != "symlink" { - t.Errorf("file type of %q is %q; want \"symlink\"", file, ent.Type) - } else if ent.LinkName != target { - t.Errorf("link target of symlink %q is %q; want %q", file, ent.LinkName, target) - } - return - } - } - t.Errorf("symlink %q not found", file) - }) -} - -func lookupMatch(name string, want *TOCEntry) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { - e, ok := r.Lookup(name) - if !ok { - t.Fatalf("failed to Lookup entry %q", name) - } - if !reflect.DeepEqual(e, want) { - t.Errorf("entry %q mismatch.\n got: %+v\nwant: %+v\n", name, e, want) - } - - }) -} - -func hasEntryOwner(entry string, owner owner) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { - ent, ok := r.Lookup(strings.TrimSuffix(entry, "/")) - if !ok { - t.Errorf("entry %q not found", entry) - return - } - if ent.UID != owner.uid || ent.GID != owner.gid { - t.Errorf("entry %q has invalid owner (uid:%d, gid:%d) instead of (uid:%d, gid:%d)", entry, ent.UID, ent.GID, owner.uid, owner.gid) - return - } - }) -} - -func mustSameEntry(files ...string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { - var first *TOCEntry - for _, f := range files { - if first == nil { - var ok bool - first, ok = r.Lookup(f) - if !ok { - t.Errorf("unknown first file on Lookup: %q", f) - return - } - } - - // Test Lookup - e, ok := r.Lookup(f) - if !ok { - t.Errorf("unknown file on Lookup: %q", f) - return - } - if e != first { - t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first) - return - } - - // Test LookupChild - pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f))) - if !ok { - t.Errorf("failed to get parent of %q", f) - return - } - e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f))) - if !ok { - t.Errorf("failed to get %q as the child of %+v", f, pe) - return - } - if e != first { - t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first) - return - } - - // Test ForeachChild - pe.ForeachChild(func(baseName string, e *TOCEntry) bool { - if baseName == filepath.Base(filepath.Clean(f)) { - if e != first { - t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first) - return false - } - } - return true - }) - } - }) -} - -func viewContent(c []byte) string { - if len(c) < 100 { - return string(c) - } - return string(c[:50]) + "...(omit)..." + string(c[50:100]) -} - -func tarOf(s ...tarEntry) []tarEntry { return s } - -type tarEntry interface { - appendTar(tw *tar.Writer, prefix string, format tar.Format) error -} - -type tarEntryFunc func(*tar.Writer, string, tar.Format) error - -func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error { - return f(tw, prefix, format) -} - -func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { - format := tar.FormatUnknown - for _, opt := range opts { - switch v := opt.(type) { - case tar.Format: - format = v - default: - panic(fmt.Errorf("unsupported opt for buildTar: %v", opt)) - } - } - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, ent := range ents { - if err := ent.appendTar(tw, prefix, format); err != nil { - t.Fatalf("building input tar: %v", err) - } - } - if err := tw.Close(); err != nil { - t.Errorf("closing write of input tar: %v", err) - } - data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works - return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) -} - -func dir(name string, opts ...interface{}) tarEntry { - return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { - var o owner - mode := os.FileMode(0755) - for _, opt := range opts { - switch v := opt.(type) { - case owner: - o = v - case os.FileMode: - mode = v - default: - return errors.New("unsupported opt") - } - } - if !strings.HasSuffix(name, "/") { - panic(fmt.Sprintf("missing trailing slash in dir %q ", name)) - } - tm, err := fileModeToTarMode(mode) - if err != nil { - return err - } - return tw.WriteHeader(&tar.Header{ - Typeflag: tar.TypeDir, - Name: prefix + name, - Mode: tm, - Uid: o.uid, - Gid: o.gid, - Format: format, - }) - }) -} - -// xAttr are extended attributes to set on test files created with the file func. -type xAttr map[string]string - -// owner is owner ot set on test files and directories with the file and dir functions. -type owner struct { - uid int - gid int -} - -func file(name, contents string, opts ...interface{}) tarEntry { - return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { - var xattrs xAttr - var o owner - mode := os.FileMode(0644) - for _, opt := range opts { - switch v := opt.(type) { - case xAttr: - xattrs = v - case owner: - o = v - case os.FileMode: - mode = v - default: - return errors.New("unsupported opt") - } - } - if strings.HasSuffix(name, "/") { - return fmt.Errorf("bogus trailing slash in file %q", name) - } - tm, err := fileModeToTarMode(mode) - if err != nil { - return err - } - if len(xattrs) > 0 { - format = tar.FormatPAX // only PAX supports xattrs - } - if err := tw.WriteHeader(&tar.Header{ - Typeflag: tar.TypeReg, - Name: prefix + name, - Mode: tm, - Xattrs: xattrs, - Size: int64(len(contents)), - Uid: o.uid, - Gid: o.gid, - Format: format, - }); err != nil { - return err - } - _, err = io.WriteString(tw, contents) - return err - }) -} - -func symlink(name, target string) tarEntry { - return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { - return tw.WriteHeader(&tar.Header{ - Typeflag: tar.TypeSymlink, - Name: prefix + name, - Linkname: target, - Mode: 0644, - Format: format, - }) - }) -} - -func link(name string, linkname string) tarEntry { - now := time.Now() - return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { - return w.WriteHeader(&tar.Header{ - Typeflag: tar.TypeLink, - Name: prefix + name, - Linkname: linkname, - ModTime: now, - Format: format, - }) - }) -} - -func chardev(name string, major, minor int64) tarEntry { - now := time.Now() - return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { - return w.WriteHeader(&tar.Header{ - Typeflag: tar.TypeChar, - Name: prefix + name, - Devmajor: major, - Devminor: minor, - ModTime: now, - Format: format, - }) - }) -} - -func blockdev(name string, major, minor int64) tarEntry { - now := time.Now() - return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { - return w.WriteHeader(&tar.Header{ - Typeflag: tar.TypeBlock, - Name: prefix + name, - Devmajor: major, - Devminor: minor, - ModTime: now, - Format: format, - }) - }) -} -func fifo(name string) tarEntry { - now := time.Now() - return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { - return w.WriteHeader(&tar.Header{ - Typeflag: tar.TypeFifo, - Name: prefix + name, - ModTime: now, - Format: format, - }) - }) -} - -func prefetchLandmark() tarEntry { - return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { - if err := w.WriteHeader(&tar.Header{ - Name: PrefetchLandmark, - Typeflag: tar.TypeReg, - Size: int64(len([]byte{landmarkContents})), - Format: format, - }); err != nil { - return err - } - contents := []byte{landmarkContents} - if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { - return err - } - return nil - }) -} - -func noPrefetchLandmark() tarEntry { - return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { - if err := w.WriteHeader(&tar.Header{ - Name: NoPrefetchLandmark, - Typeflag: tar.TypeReg, - Size: int64(len([]byte{landmarkContents})), - Format: format, - }); err != nil { - return err - } - contents := []byte{landmarkContents} - if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { - return err - } - return nil - }) -} - -func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { - if digestMap == nil { - t.Fatalf("digest map mustn't be nil") - } - content := []byte(contentStr) - - var n int64 - for n < int64(len(content)) { - size := int64(chunkSize) - remain := int64(len(content)) - n - if remain < size { - size = remain - } - dgstr := digest.Canonical.Digester() - if _, err := io.CopyN(dgstr.Hash(), bytes.NewReader(content[n:n+size]), size); err != nil { - t.Fatalf("failed to calculate digest of %q (name=%q,offset=%d,size=%d)", - string(content[n:n+size]), name, n, size) - } - digestMap[chunkID(name, n, size)] = dgstr.Digest() - n += size - } - - return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { - if err := w.WriteHeader(&tar.Header{ - Typeflag: tar.TypeReg, - Name: prefix + name, - Size: int64(len(content)), - Format: format, - }); err != nil { - return err - } - if _, err := io.CopyN(w, bytes.NewReader(content), int64(len(content))); err != nil { - return err - } - return nil - }) -} - -var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - -func randomContents(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = runes[rand.Intn(len(runes))] - } - return string(b) -} - -func fileModeToTarMode(mode os.FileMode) (int64, error) { - h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "") - if err != nil { - return 0, err - } - return h.Mode, nil -} - -// fileInfoOnlyMode is os.FileMode that populates only file mode. -type fileInfoOnlyMode os.FileMode - -func (f fileInfoOnlyMode) Name() string { return "" } -func (f fileInfoOnlyMode) Size() int64 { return 0 } -func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) } -func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } -func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } -func (f fileInfoOnlyMode) Sys() interface{} { return nil } - -func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { - if len(streams) == 0 { - return // nop - } - - wants := map[int64]struct{}{} - for _, s := range streams { - wants[s] = struct{}{} - } - - len0 := len(b) - br := bytes.NewReader(b) - zr := new(gzip.Reader) - t.Logf("got gzip streams:") - numStreams := 0 - for { - zoff := len0 - br.Len() - if err := zr.Reset(br); err != nil { - if err == io.EOF { - return - } - t.Fatalf("countStreams(gzip), Reset: %v", err) - } - zr.Multistream(false) - n, err := io.Copy(io.Discard, zr) - if err != nil { - t.Fatalf("countStreams(gzip), Copy: %v", err) - } - var extra string - if len(zr.Header.Extra) > 0 { - extra = fmt.Sprintf("; extra=%q", zr.Header.Extra) - } - t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra) - delete(wants, int64(zoff)) - numStreams++ - } -} - -func GzipDiffIDOf(t *testing.T, b []byte) string { - h := sha256.New() - zr, err := gzip.NewReader(bytes.NewReader(b)) - if err != nil { - t.Fatalf("diffIDOf(gzip): %v", err) - } - defer zr.Close() - if _, err := io.Copy(h, zr); err != nil { - t.Fatalf("diffIDOf(gzip).Copy: %v", err) - } - return fmt.Sprintf("sha256:%x", h.Sum(nil)) -} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go deleted file mode 100644 index 57e0aa614e..0000000000 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go +++ /dev/null @@ -1,342 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - Copyright 2019 The Go Authors. All rights reserved. - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. -*/ - -package estargz - -import ( - "archive/tar" - "hash" - "io" - "os" - "path" - "time" - - digest "github.com/opencontainers/go-digest" -) - -const ( - // TOCTarName is the name of the JSON file in the tar archive in the - // table of contents gzip stream. - TOCTarName = "stargz.index.json" - - // FooterSize is the number of bytes in the footer - // - // The footer is an empty gzip stream with no compression and an Extra - // header of the form "%016xSTARGZ", where the 64 bit hex-encoded - // number is the offset to the gzip stream of JSON TOC. - // - // 51 comes from: - // - // 10 bytes gzip header - // 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) - // 2 bytes Extra: SI1 = 'S', SI2 = 'G' - // 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) - // 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) - // 5 bytes flate header - // 8 bytes gzip footer - // (End of the eStargz blob) - // - // NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz. - FooterSize = 51 - - // legacyFooterSize is the number of bytes in the legacy stargz footer. - // - // 47 comes from: - // - // 10 byte gzip header + - // 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" + - // 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset)) - // 5 byte flate header - // 8 byte gzip footer (two little endian uint32s: digest, size) - legacyFooterSize = 47 - - // TOCJSONDigestAnnotation is an annotation for an image layer. This stores the - // digest of the TOC JSON. - // This annotation is valid only when it is specified in `.[]layers.annotations` - // of an image manifest. - TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" - - // StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy - // pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size - // to the runtime but current OCI image doesn't ship this information by default. So we store this - // to the special annotation. - StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size" - - // PrefetchLandmark is a file entry which indicates the end position of - // prefetch in the stargz file. - PrefetchLandmark = ".prefetch.landmark" - - // NoPrefetchLandmark is a file entry which indicates that no prefetch should - // occur in the stargz file. - NoPrefetchLandmark = ".no.prefetch.landmark" - - landmarkContents = 0xf -) - -// JTOC is the JSON-serialized table of contents index of the files in the stargz file. -type JTOC struct { - Version int `json:"version"` - Entries []*TOCEntry `json:"entries"` -} - -// TOCEntry is an entry in the stargz file's TOC (Table of Contents). -type TOCEntry struct { - // Name is the tar entry's name. It is the complete path - // stored in the tar file, not just the base name. - Name string `json:"name"` - - // Type is one of "dir", "reg", "symlink", "hardlink", "char", - // "block", "fifo", or "chunk". - // The "chunk" type is used for regular file data chunks past the first - // TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset, - // ChunkOffset, and ChunkSize populated. - Type string `json:"type"` - - // Size, for regular files, is the logical size of the file. - Size int64 `json:"size,omitempty"` - - // ModTime3339 is the modification time of the tar entry. Empty - // means zero or unknown. Otherwise it's in UTC RFC3339 - // format. Use the ModTime method to access the time.Time value. - ModTime3339 string `json:"modtime,omitempty"` - modTime time.Time - - // LinkName, for symlinks and hardlinks, is the link target. - LinkName string `json:"linkName,omitempty"` - - // Mode is the permission and mode bits. - Mode int64 `json:"mode,omitempty"` - - // UID is the user ID of the owner. - UID int `json:"uid,omitempty"` - - // GID is the group ID of the owner. - GID int `json:"gid,omitempty"` - - // Uname is the username of the owner. - // - // In the serialized JSON, this field may only be present for - // the first entry with the same UID. - Uname string `json:"userName,omitempty"` - - // Gname is the group name of the owner. - // - // In the serialized JSON, this field may only be present for - // the first entry with the same GID. - Gname string `json:"groupName,omitempty"` - - // Offset, for regular files, provides the offset in the - // stargz file to the file's data bytes. See ChunkOffset and - // ChunkSize. - Offset int64 `json:"offset,omitempty"` - - // InnerOffset is an optional field indicates uncompressed offset - // of this "reg" or "chunk" payload in a stream starts from Offset. - // This field enables to put multiple "reg" or "chunk" payloads - // in one chunk with having the same Offset but different InnerOffset. - InnerOffset int64 `json:"innerOffset,omitempty"` - - nextOffset int64 // the Offset of the next entry with a non-zero Offset - - // DevMajor is the major device number for "char" and "block" types. - DevMajor int `json:"devMajor,omitempty"` - - // DevMinor is the major device number for "char" and "block" types. - DevMinor int `json:"devMinor,omitempty"` - - // NumLink is the number of entry names pointing to this entry. - // Zero means one name references this entry. - // This field is calculated during runtime and not recorded in TOC JSON. - NumLink int `json:"-"` - - // Xattrs are the extended attribute for the entry. - Xattrs map[string][]byte `json:"xattrs,omitempty"` - - // Digest stores the OCI checksum for regular files payload. - // It has the form "sha256:abcdef01234....". - Digest string `json:"digest,omitempty"` - - // ChunkOffset is non-zero if this is a chunk of a large, - // regular file. If so, the Offset is where the gzip header of - // ChunkSize bytes at ChunkOffset in Name begin. - // - // In serialized form, a "chunkSize" JSON field of zero means - // that the chunk goes to the end of the file. After reading - // from the stargz TOC, though, the ChunkSize is initialized - // to a non-zero file for when Type is either "reg" or - // "chunk". - ChunkOffset int64 `json:"chunkOffset,omitempty"` - ChunkSize int64 `json:"chunkSize,omitempty"` - - // ChunkDigest stores an OCI digest of the chunk. This must be formed - // as "sha256:0123abcd...". - ChunkDigest string `json:"chunkDigest,omitempty"` - - children map[string]*TOCEntry - - // chunkTopIndex is index of the entry where Offset starts in the blob. - chunkTopIndex int -} - -// ModTime returns the entry's modification time. -func (e *TOCEntry) ModTime() time.Time { return e.modTime } - -// NextOffset returns the position (relative to the start of the -// stargz file) of the next gzip boundary after e.Offset. -func (e *TOCEntry) NextOffset() int64 { return e.nextOffset } - -func (e *TOCEntry) addChild(baseName string, child *TOCEntry) { - if e.children == nil { - e.children = make(map[string]*TOCEntry) - } - if child.Type == "dir" { - e.NumLink++ // Entry ".." in the subdirectory links to this directory - } - e.children[baseName] = child -} - -// isDataType reports whether TOCEntry is a regular file or chunk (something that -// contains regular file data). -func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" } - -// Stat returns a FileInfo value representing e. -func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} } - -// ForeachChild calls f for each child item. If f returns false, iteration ends. -// If e is not a directory, f is not called. -func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) { - for name, ent := range e.children { - if !f(name, ent) { - return - } - } -} - -// LookupChild returns the directory e's child by its base name. -func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) { - child, ok = e.children[baseName] - return -} - -// fileInfo implements os.FileInfo using the wrapped *TOCEntry. -type fileInfo struct{ e *TOCEntry } - -var _ os.FileInfo = fileInfo{} - -func (fi fileInfo) Name() string { return path.Base(fi.e.Name) } -func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" } -func (fi fileInfo) Size() int64 { return fi.e.Size } -func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() } -func (fi fileInfo) Sys() interface{} { return fi.e } -func (fi fileInfo) Mode() (m os.FileMode) { - // TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg. - m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() & - (os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky) - switch fi.e.Type { - case "dir": - m |= os.ModeDir - case "symlink": - m |= os.ModeSymlink - case "char": - m |= os.ModeDevice | os.ModeCharDevice - case "block": - m |= os.ModeDevice - case "fifo": - m |= os.ModeNamedPipe - } - return m -} - -// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained -// in a eStargz blob. -type TOCEntryVerifier interface { - - // Verifier provides a content verifier that can be used for verifying the - // contents of the specified TOCEntry. - Verifier(ce *TOCEntry) (digest.Verifier, error) -} - -// Compression provides the compression helper to be used creating and parsing eStargz. -// This package provides gzip-based Compression by default, but any compression -// algorithm (e.g. zstd) can be used as long as it implements Compression. -type Compression interface { - Compressor - Decompressor -} - -// Compressor represents the helper mothods to be used for creating eStargz. -type Compressor interface { - // Writer returns WriteCloser to be used for writing a chunk to eStargz. - // Everytime a chunk is written, the WriteCloser is closed and Writer is - // called again for writing the next chunk. - // - // The returned writer should implement "Flush() error" function that flushes - // any pending compressed data to the underlying writer. - Writer(w io.Writer) (WriteFlushCloser, error) - - // WriteTOCAndFooter is called to write JTOC to the passed Writer. - // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob - // WriteTOCAndFooter can optionally write anything that affects DiffID calculation - // (e.g. uncompressed TOC JSON). - // - // This function returns tocDgst that represents the digest of TOC that will be used - // to verify this blob when it's parsed. - WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error) -} - -// Decompressor represents the helper mothods to be used for parsing eStargz. -type Decompressor interface { - // Reader returns ReadCloser to be used for decompressing file payload. - Reader(r io.Reader) (io.ReadCloser, error) - - // FooterSize returns the size of the footer of this blob. - FooterSize() int64 - - // ParseFooter parses the footer and returns the offset and (compressed) size of TOC. - // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between - // the top until the TOC JSON). - // - // If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader - // to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it. - // - // tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the - // footer (blob size - tocOff - FooterSize). - // If blobPayloadSize < 0, blobPayloadSize become the blob size. - ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) - - // ParseTOC parses TOC from the passed reader. The reader provides the partial contents - // of the underlying blob that has the range specified by ParseFooter method. - // - // This function returns tocDgst that represents the digest of TOC that will be used - // to verify this blob. This must match to the value returned from - // Compressor.WriteTOCAndFooter that is used when creating this blob. - // - // If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob. - // Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location - // and return it. - ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) -} - -type WriteFlushCloser interface { - io.WriteCloser - Flush() error -} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md deleted file mode 100644 index 1cade6cef6..0000000000 --- a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Brian Goff - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go deleted file mode 100644 index 42bf32aab0..0000000000 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go +++ /dev/null @@ -1,16 +0,0 @@ -package md2man - -import ( - "github.com/russross/blackfriday/v2" -) - -// Render converts a markdown document into a roff formatted document. -func Render(doc []byte) []byte { - renderer := NewRoffRenderer() - - return blackfriday.Run(doc, - []blackfriday.Option{ - blackfriday.WithRenderer(renderer), - blackfriday.WithExtensions(renderer.GetExtensions()), - }...) -} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go deleted file mode 100644 index 4b19188d90..0000000000 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ /dev/null @@ -1,348 +0,0 @@ -package md2man - -import ( - "bytes" - "fmt" - "io" - "os" - "strings" - - "github.com/russross/blackfriday/v2" -) - -// roffRenderer implements the blackfriday.Renderer interface for creating -// roff format (manpages) from markdown text -type roffRenderer struct { - extensions blackfriday.Extensions - listCounters []int - firstHeader bool - firstDD bool - listDepth int -} - -const ( - titleHeader = ".TH " - topLevelHeader = "\n\n.SH " - secondLevelHdr = "\n.SH " - otherHeader = "\n.SS " - crTag = "\n" - emphTag = "\\fI" - emphCloseTag = "\\fP" - strongTag = "\\fB" - strongCloseTag = "\\fP" - breakTag = "\n.br\n" - paraTag = "\n.PP\n" - hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" - linkTag = "\n\\[la]" - linkCloseTag = "\\[ra]" - codespanTag = "\\fB" - codespanCloseTag = "\\fR" - codeTag = "\n.EX\n" - codeCloseTag = "\n.EE\n" - quoteTag = "\n.PP\n.RS\n" - quoteCloseTag = "\n.RE\n" - listTag = "\n.RS\n" - listCloseTag = "\n.RE\n" - dtTag = "\n.TP\n" - dd2Tag = "\n" - tableStart = "\n.TS\nallbox;\n" - tableEnd = ".TE\n" - tableCellStart = "T{\n" - tableCellEnd = "\nT}\n" -) - -// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents -// from markdown -func NewRoffRenderer() *roffRenderer { // nolint: golint - var extensions blackfriday.Extensions - - extensions |= blackfriday.NoIntraEmphasis - extensions |= blackfriday.Tables - extensions |= blackfriday.FencedCode - extensions |= blackfriday.SpaceHeadings - extensions |= blackfriday.Footnotes - extensions |= blackfriday.Titleblock - extensions |= blackfriday.DefinitionLists - return &roffRenderer{ - extensions: extensions, - } -} - -// GetExtensions returns the list of extensions used by this renderer implementation -func (r *roffRenderer) GetExtensions() blackfriday.Extensions { - return r.extensions -} - -// RenderHeader handles outputting the header at document start -func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { - // disable hyphenation - out(w, ".nh\n") -} - -// RenderFooter handles outputting the footer at the document end; the roff -// renderer has no footer information -func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { -} - -// RenderNode is called for each node in a markdown document; based on the node -// type the equivalent roff output is sent to the writer -func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { - walkAction := blackfriday.GoToNext - - switch node.Type { - case blackfriday.Text: - escapeSpecialChars(w, node.Literal) - case blackfriday.Softbreak: - out(w, crTag) - case blackfriday.Hardbreak: - out(w, breakTag) - case blackfriday.Emph: - if entering { - out(w, emphTag) - } else { - out(w, emphCloseTag) - } - case blackfriday.Strong: - if entering { - out(w, strongTag) - } else { - out(w, strongCloseTag) - } - case blackfriday.Link: - // Don't render the link text for automatic links, because this - // will only duplicate the URL in the roff output. - // See https://daringfireball.net/projects/markdown/syntax#autolink - if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) { - out(w, string(node.FirstChild.Literal)) - } - // Hyphens in a link must be escaped to avoid word-wrap in the rendered man page. - escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-") - out(w, linkTag+escapedLink+linkCloseTag) - walkAction = blackfriday.SkipChildren - case blackfriday.Image: - // ignore images - walkAction = blackfriday.SkipChildren - case blackfriday.Code: - out(w, codespanTag) - escapeSpecialChars(w, node.Literal) - out(w, codespanCloseTag) - case blackfriday.Document: - break - case blackfriday.Paragraph: - // roff .PP markers break lists - if r.listDepth > 0 { - return blackfriday.GoToNext - } - if entering { - out(w, paraTag) - } else { - out(w, crTag) - } - case blackfriday.BlockQuote: - if entering { - out(w, quoteTag) - } else { - out(w, quoteCloseTag) - } - case blackfriday.Heading: - r.handleHeading(w, node, entering) - case blackfriday.HorizontalRule: - out(w, hruleTag) - case blackfriday.List: - r.handleList(w, node, entering) - case blackfriday.Item: - r.handleItem(w, node, entering) - case blackfriday.CodeBlock: - out(w, codeTag) - escapeSpecialChars(w, node.Literal) - out(w, codeCloseTag) - case blackfriday.Table: - r.handleTable(w, node, entering) - case blackfriday.TableHead: - case blackfriday.TableBody: - case blackfriday.TableRow: - // no action as cell entries do all the nroff formatting - return blackfriday.GoToNext - case blackfriday.TableCell: - r.handleTableCell(w, node, entering) - case blackfriday.HTMLSpan: - // ignore other HTML tags - case blackfriday.HTMLBlock: - if bytes.HasPrefix(node.Literal, []byte(" - -## Using Multiple `Keychain`s - -[`NewMultiKeychain`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/authn#NewMultiKeychain) allows you to specify multiple `Keychain` implementations, which will be checked in order when credentials are needed. - -For example: - -```go -kc := authn.NewMultiKeychain( - authn.DefaultKeychain, - google.Keychain, - authn.NewKeychainFromHelper(ecr.ECRHelper{ClientFactory: api.DefaultClientFactory{}}), - authn.NewKeychainFromHelper(acr.ACRCredHelper{}), -) -``` - -This multi-keychain will: - -- first check for credentials found in the Docker config file, as describe above, then -- check for GCP credentials available in the environment, as described above, then -- check for ECR credentials by emulating the ECR credential helper, then -- check for ACR credentials by emulating the ACR credential helper. - -If any keychain implementation is able to provide credentials for the request, they will be used, and further keychain implementations will not be consulted. - -If no implementations are able to provide credentials, `Anonymous` credentials will be used. - -## Docker Config Auth - -What follows attempts to gather useful information about Docker's config.json and make it available in one place. - -If you have questions, please [file an issue](https://github.com/google/go-containerregistry/issues/new). - -### Plaintext - -The config file is where your credentials are stored when you invoke `docker login`, e.g. the contents may look something like this: - -```json -{ - "auths": { - "registry.example.com": { - "auth": "QXp1cmVEaWFtb25kOmh1bnRlcjI=" - } - } -} -``` - -The `auths` map has an entry per registry, and the `auth` field contains your username and password encoded as [HTTP 'Basic' Auth](https://tools.ietf.org/html/rfc7617). - -**NOTE**: This means that your credentials are stored _in plaintext_: - -```bash -$ echo "QXp1cmVEaWFtb25kOmh1bnRlcjI=" | base64 -d -AzureDiamond:hunter2 -``` - -For what it's worth, this config file is equivalent to: - -```json -{ - "auths": { - "registry.example.com": { - "username": "AzureDiamond", - "password": "hunter2" - } - } -} -``` - -... which is useful to know if e.g. your CI system provides you a registry username and password via environment variables and you want to populate this file manually without invoking `docker login`. - -### Helpers - -If you log in like this, `docker` will warn you that you should use a [credential helper](https://docs.docker.com/engine/reference/commandline/login/#credentials-store), and you should! - -To configure a global credential helper: -```json -{ - "credsStore": "osxkeychain" -} -``` - -To configure a per-registry credential helper: -```json -{ - "credHelpers": { - "gcr.io": "gcr" - } -} -``` - -We use [`github.com/docker/cli/cli/config.Load`](https://godoc.org/github.com/docker/cli/cli/config#Load) to parse the config file and invoke any necessary credential helpers. This handles the logic of taking a [`ConfigFile`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/configfile/file.go#L25-L54) + registry domain and producing an [`AuthConfig`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L3-L22), which determines how we authenticate to the registry. - -## Credential Helpers - -The [credential helper protocol](https://github.com/docker/docker-credential-helpers) allows you to configure a binary that supplies credentials for the registry, rather than hard-coding them in the config file. - -The protocol has several verbs, but the one we most care about is `get`. - -For example, using the following config file: -```json -{ - "credHelpers": { - "gcr.io": "gcr", - "eu.gcr.io": "gcr" - } -} -``` - -To acquire credentials for `gcr.io`, we look in the `credHelpers` map to find -the credential helper for `gcr.io` is `gcr`. By appending that value to -`docker-credential-`, we can get the name of the binary we need to use. - -For this example, that's `docker-credential-gcr`, which must be on our `$PATH`. -We'll then invoke that binary to get credentials: - -```bash -$ echo "gcr.io" | docker-credential-gcr get -{"Username":"_token","Secret":""} -``` - -You can configure the same credential helper for multiple registries, which is -why we need to pass the domain in via STDIN, e.g. if we were trying to access -`eu.gcr.io`, we'd do this instead: - -```bash -$ echo "eu.gcr.io" | docker-credential-gcr get -{"Username":"_token","Secret":""} -``` - -### Debugging credential helpers - -If a credential helper is configured but doesn't seem to be working, it can be -challenging to debug. Implementing a fake credential helper lets you poke around -to make it easier to see where the failure is happening. - -This "implements" a credential helper with hard-coded values: -``` -#!/usr/bin/env bash -echo '{"Username":"","Secret":"hunter2"}' -``` - - -This implements a credential helper that prints the output of -`docker-credential-gcr` to both stderr and whatever called it, which allows you -to snoop on another credential helper: -``` -#!/usr/bin/env bash -docker-credential-gcr $@ | tee >(cat 1>&2) -``` - -Put those files somewhere on your path, naming them e.g. -`docker-credential-hardcoded` and `docker-credential-tee`, then modify the -config file to use them: - -```json -{ - "credHelpers": { - "gcr.io": "tee", - "eu.gcr.io": "hardcoded" - } -} -``` - -The `docker-credential-tee` trick works with both `crane` and `docker`: - -```bash -$ crane manifest gcr.io/google-containers/pause > /dev/null -{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":""} - -$ docker pull gcr.io/google-containers/pause -Using default tag: latest -{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":""} -latest: Pulling from google-containers/pause -a3ed95caeb02: Pull complete -4964c72cd024: Pull complete -Digest: sha256:a78c2d6208eff9b672de43f880093100050983047b7b0afe0217d3656e1b0d5f -Status: Downloaded newer image for gcr.io/google-containers/pause:latest -gcr.io/google-containers/pause:latest -``` - -## The Registry - -There are two methods for authenticating against a registry: -[token](https://docs.docker.com/registry/spec/auth/token/) and -[oauth2](https://docs.docker.com/registry/spec/auth/oauth/). - -Both methods are used to acquire an opaque `Bearer` token (or -[RegistryToken](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L21)) -to use in the `Authorization` header. The registry will return a `401 -Unauthorized` during the [version -check](https://github.com/opencontainers/distribution-spec/blob/2c3975d1f03b67c9a0203199038adea0413f0573/spec.md#api-version-check) -(or during normal operations) with -[Www-Authenticate](https://tools.ietf.org/html/rfc7235#section-4.1) challenge -indicating how to proceed. - -### Token - -If we get back an `AuthConfig` containing a [`Username/Password`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L5-L6) -or -[`Auth`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L7), -we'll use the token method for authentication: - -![basic](../../images/credhelper-basic.svg) - -### OAuth 2 - -If we get back an `AuthConfig` containing an [`IdentityToken`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L18) -we'll use the oauth2 method for authentication: - -![oauth](../../images/credhelper-oauth.svg) - -This happens when a credential helper returns a response with the -[`Username`](https://github.com/docker/docker-credential-helpers/blob/f78081d1f7fef6ad74ad6b79368de6348386e591/credentials/credentials.go#L16) -set to `` (no, that's not a placeholder, the literal string `""`). -It is unclear why: [moby/moby#36926](https://github.com/moby/moby/issues/36926). - -We only support the oauth2 `grant_type` for `refresh_token` ([#629](https://github.com/google/go-containerregistry/issues/629)), -since it's impossible to determine from the registry response whether we should -use oauth, and the token method for authentication is widely implemented by -registries. diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go b/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go deleted file mode 100644 index 83214957d5..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -// anonymous implements Authenticator for anonymous authentication. -type anonymous struct{} - -// Authorization implements Authenticator. -func (a *anonymous) Authorization() (*AuthConfig, error) { - return &AuthConfig{}, nil -} - -// Anonymous is a singleton Authenticator for providing anonymous auth. -var Anonymous Authenticator = &anonymous{} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go b/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go deleted file mode 100644 index 0111f1ae72..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -// auth is an Authenticator that simply returns the wrapped AuthConfig. -type auth struct { - config AuthConfig -} - -// FromConfig returns an Authenticator that just returns the given AuthConfig. -func FromConfig(cfg AuthConfig) Authenticator { - return &auth{cfg} -} - -// Authorization implements Authenticator. -func (a *auth) Authorization() (*AuthConfig, error) { - return &a.config, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go b/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go deleted file mode 100644 index 172d218e4f..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "strings" -) - -// Authenticator is used to authenticate Docker transports. -type Authenticator interface { - // Authorization returns the value to use in an http transport's Authorization header. - Authorization() (*AuthConfig, error) -} - -// AuthConfig contains authorization information for connecting to a Registry -// Inlined what we use from github.com/docker/cli/cli/config/types -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} - -// This is effectively a copy of the type AuthConfig. This simplifies -// JSON unmarshalling since AuthConfig methods are not inherited -type authConfig AuthConfig - -// UnmarshalJSON implements json.Unmarshaler -func (a *AuthConfig) UnmarshalJSON(data []byte) error { - var shadow authConfig - err := json.Unmarshal(data, &shadow) - if err != nil { - return err - } - - *a = (AuthConfig)(shadow) - - if len(shadow.Auth) != 0 { - var derr error - a.Username, a.Password, derr = decodeDockerConfigFieldAuth(shadow.Auth) - if derr != nil { - err = fmt.Errorf("unable to decode auth field: %w", derr) - } - } else if len(a.Username) != 0 && len(a.Password) != 0 { - a.Auth = encodeDockerConfigFieldAuth(shadow.Username, shadow.Password) - } - - return err -} - -// MarshalJSON implements json.Marshaler -func (a AuthConfig) MarshalJSON() ([]byte, error) { - shadow := (authConfig)(a) - shadow.Auth = encodeDockerConfigFieldAuth(shadow.Username, shadow.Password) - return json.Marshal(shadow) -} - -// decodeDockerConfigFieldAuth deserializes the "auth" field from dockercfg into a -// username and a password. The format of the auth field is base64(:). -// -// From https://github.com/kubernetes/kubernetes/blob/75e49ec824b183288e1dbaccfd7dbe77d89db381/pkg/credentialprovider/config.go -// Copyright 2014 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 -func decodeDockerConfigFieldAuth(field string) (username, password string, err error) { - var decoded []byte - // StdEncoding can only decode padded string - // RawStdEncoding can only decode unpadded string - if strings.HasSuffix(strings.TrimSpace(field), "=") { - // decode padded data - decoded, err = base64.StdEncoding.DecodeString(field) - } else { - // decode unpadded data - decoded, err = base64.RawStdEncoding.DecodeString(field) - } - - if err != nil { - return - } - - parts := strings.SplitN(string(decoded), ":", 2) - if len(parts) != 2 { - err = fmt.Errorf("must be formatted as base64(username:password)") - return - } - - username = parts[0] - password = parts[1] - - return -} - -func encodeDockerConfigFieldAuth(username, password string) string { - return base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go b/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go deleted file mode 100644 index 500cb6616f..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -// Basic implements Authenticator for basic authentication. -type Basic struct { - Username string - Password string -} - -// Authorization implements Authenticator. -func (b *Basic) Authorization() (*AuthConfig, error) { - return &AuthConfig{ - Username: b.Username, - Password: b.Password, - }, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go deleted file mode 100644 index 4cf86df92f..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -// Bearer implements Authenticator for bearer authentication. -type Bearer struct { - Token string `json:"token"` -} - -// Authorization implements Authenticator. -func (b *Bearer) Authorization() (*AuthConfig, error) { - return &AuthConfig{ - RegistryToken: b.Token, - }, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go b/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go deleted file mode 100644 index c2a5fc0267..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package authn defines different methods of authentication for -// talking to a container registry. -package authn diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/github/keychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/github/keychain.go deleted file mode 100644 index 97ad34e616..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/github/keychain.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2022 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package github provides a keychain for the GitHub Container Registry. -package github - -import ( - "net/url" - "os" - - "github.com/google/go-containerregistry/pkg/authn" -) - -const ghcrHostname = "ghcr.io" - -// Keychain exports an instance of the GitHub Keychain. -// -// This keychain matches on requests for ghcr.io and provides the value of the -// environment variable $GITHUB_TOKEN, if it's set. -var Keychain authn.Keychain = githubKeychain{} - -type githubKeychain struct{} - -func (githubKeychain) Resolve(r authn.Resource) (authn.Authenticator, error) { - serverURL, err := url.Parse("https://" + r.String()) - if err != nil { - return authn.Anonymous, nil - } - if serverURL.Hostname() == ghcrHostname { - username := os.Getenv("GITHUB_ACTOR") - if username == "" { - username = "unset" - } - if tok := os.Getenv("GITHUB_TOKEN"); tok != "" { - return githubAuthenticator{username, tok}, nil - } - } - return authn.Anonymous, nil -} - -type githubAuthenticator struct{ username, password string } - -func (g githubAuthenticator) Authorization() (*authn.AuthConfig, error) { - return &authn.AuthConfig{ - Username: g.username, - Password: g.password, - }, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go deleted file mode 100644 index 99e0b81c8d..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -import ( - "os" - "path/filepath" - "sync" - "time" - - "github.com/docker/cli/cli/config" - "github.com/docker/cli/cli/config/configfile" - "github.com/docker/cli/cli/config/types" - "github.com/google/go-containerregistry/pkg/name" - "github.com/mitchellh/go-homedir" -) - -// Resource represents a registry or repository that can be authenticated against. -type Resource interface { - // String returns the full string representation of the target, e.g. - // gcr.io/my-project or just gcr.io. - String() string - - // RegistryStr returns just the registry portion of the target, e.g. for - // gcr.io/my-project, this should just return gcr.io. This is needed to - // pull out an appropriate hostname. - RegistryStr() string -} - -// Keychain is an interface for resolving an image reference to a credential. -type Keychain interface { - // Resolve looks up the most appropriate credential for the specified target. - Resolve(Resource) (Authenticator, error) -} - -// defaultKeychain implements Keychain with the semantics of the standard Docker -// credential keychain. -type defaultKeychain struct { - mu sync.Mutex -} - -var ( - // DefaultKeychain implements Keychain by interpreting the docker config file. - DefaultKeychain = &defaultKeychain{} -) - -const ( - // DefaultAuthKey is the key used for dockerhub in config files, which - // is hardcoded for historical reasons. - DefaultAuthKey = "https://" + name.DefaultRegistry + "/v1/" -) - -// Resolve implements Keychain. -func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) { - dk.mu.Lock() - defer dk.mu.Unlock() - - // Podman users may have their container registry auth configured in a - // different location, that Docker packages aren't aware of. - // If the Docker config file isn't found, we'll fallback to look where - // Podman configures it, and parse that as a Docker auth config instead. - - // First, check $HOME/.docker/config.json - foundDockerConfig := false - home, err := homedir.Dir() - if err == nil { - foundDockerConfig = fileExists(filepath.Join(home, ".docker/config.json")) - } - // If $HOME/.docker/config.json isn't found, check $DOCKER_CONFIG (if set) - if !foundDockerConfig && os.Getenv("DOCKER_CONFIG") != "" { - foundDockerConfig = fileExists(filepath.Join(os.Getenv("DOCKER_CONFIG"), "config.json")) - } - // If either of those locations are found, load it using Docker's - // config.Load, which may fail if the config can't be parsed. - // - // If neither was found, look for Podman's auth at - // $XDG_RUNTIME_DIR/containers/auth.json and attempt to load it as a - // Docker config. - // - // If neither are found, fallback to Anonymous. - var cf *configfile.ConfigFile - if foundDockerConfig { - cf, err = config.Load(os.Getenv("DOCKER_CONFIG")) - if err != nil { - return nil, err - } - } else { - f, err := os.Open(filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "containers/auth.json")) - if err != nil { - return Anonymous, nil - } - defer f.Close() - cf, err = config.LoadFromReader(f) - if err != nil { - return nil, err - } - } - - // See: - // https://github.com/google/ko/issues/90 - // https://github.com/moby/moby/blob/fc01c2b481097a6057bec3cd1ab2d7b4488c50c4/registry/config.go#L397-L404 - var cfg, empty types.AuthConfig - for _, key := range []string{ - target.String(), - target.RegistryStr(), - } { - if key == name.DefaultRegistry { - key = DefaultAuthKey - } - - cfg, err = cf.GetAuthConfig(key) - if err != nil { - return nil, err - } - // cf.GetAuthConfig automatically sets the ServerAddress attribute. Since - // we don't make use of it, clear the value for a proper "is-empty" test. - // See: https://github.com/google/go-containerregistry/issues/1510 - cfg.ServerAddress = "" - if cfg != empty { - break - } - } - if cfg == empty { - return Anonymous, nil - } - - return FromConfig(AuthConfig{ - Username: cfg.Username, - Password: cfg.Password, - Auth: cfg.Auth, - IdentityToken: cfg.IdentityToken, - RegistryToken: cfg.RegistryToken, - }), nil -} - -// fileExists returns true if the given path exists and is not a directory. -func fileExists(path string) bool { - fi, err := os.Stat(path) - return err == nil && !fi.IsDir() -} - -// Helper is a subset of the Docker credential helper credentials.Helper -// interface used by NewKeychainFromHelper. -// -// See: -// https://pkg.go.dev/github.com/docker/docker-credential-helpers/credentials#Helper -type Helper interface { - Get(serverURL string) (string, string, error) -} - -// NewKeychainFromHelper returns a Keychain based on a Docker credential helper -// implementation that can Get username and password credentials for a given -// server URL. -func NewKeychainFromHelper(h Helper) Keychain { return wrapper{h} } - -type wrapper struct{ h Helper } - -func (w wrapper) Resolve(r Resource) (Authenticator, error) { - u, p, err := w.h.Get(r.RegistryStr()) - if err != nil { - return Anonymous, nil - } - // If the secret being stored is an identity token, the Username should be set to - // ref: https://docs.docker.com/engine/reference/commandline/login/#credential-helper-protocol - if u == "" { - return FromConfig(AuthConfig{Username: u, IdentityToken: p}), nil - } - return FromConfig(AuthConfig{Username: u, Password: p}), nil -} - -func RefreshingKeychain(inner Keychain, duration time.Duration) Keychain { - return &refreshingKeychain{ - keychain: inner, - duration: duration, - } -} - -type refreshingKeychain struct { - keychain Keychain - duration time.Duration - clock func() time.Time -} - -func (r *refreshingKeychain) Resolve(target Resource) (Authenticator, error) { - last := time.Now() - auth, err := r.keychain.Resolve(target) - if err != nil || auth == Anonymous { - return auth, err - } - return &refreshing{ - target: target, - keychain: r.keychain, - last: last, - cached: auth, - duration: r.duration, - clock: r.clock, - }, nil -} - -type refreshing struct { - sync.Mutex - target Resource - keychain Keychain - - duration time.Duration - - last time.Time - cached Authenticator - - // for testing - clock func() time.Time -} - -func (r *refreshing) Authorization() (*AuthConfig, error) { - r.Lock() - defer r.Unlock() - if r.cached == nil || r.expired() { - r.last = r.now() - auth, err := r.keychain.Resolve(r.target) - if err != nil { - return nil, err - } - r.cached = auth - } - return r.cached.Authorization() -} - -func (r *refreshing) now() time.Time { - if r.clock == nil { - return time.Now() - } - return r.clock() -} - -func (r *refreshing) expired() bool { - return r.now().Sub(r.last) > r.duration -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go deleted file mode 100644 index 3b1804f5d0..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -type multiKeychain struct { - keychains []Keychain -} - -// Assert that our multi-keychain implements Keychain. -var _ (Keychain) = (*multiKeychain)(nil) - -// NewMultiKeychain composes a list of keychains into one new keychain. -func NewMultiKeychain(kcs ...Keychain) Keychain { - return &multiKeychain{keychains: kcs} -} - -// Resolve implements Keychain. -func (mk *multiKeychain) Resolve(target Resource) (Authenticator, error) { - for _, kc := range mk.keychains { - auth, err := kc.Resolve(target) - if err != nil { - return nil, err - } - if auth != Anonymous { - return auth, nil - } - } - return Anonymous, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/compression/compression.go b/vendor/github.com/google/go-containerregistry/pkg/compression/compression.go deleted file mode 100644 index 6686c2d8d9..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/compression/compression.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2022 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package compression abstracts over gzip and zstd. -package compression - -// Compression is an enumeration of the supported compression algorithms -type Compression string - -// The collection of known MediaType values. -const ( - None Compression = "none" - GZip Compression = "gzip" - ZStd Compression = "zstd" -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/append.go b/vendor/github.com/google/go-containerregistry/pkg/crane/append.go deleted file mode 100644 index f1c2ef69ae..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/append.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crane - -import ( - "fmt" - "os" - - "github.com/google/go-containerregistry/internal/windows" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/mutate" - "github.com/google/go-containerregistry/pkg/v1/stream" - "github.com/google/go-containerregistry/pkg/v1/tarball" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -func isWindows(img v1.Image) (bool, error) { - cfg, err := img.ConfigFile() - if err != nil { - return false, err - } - return cfg != nil && cfg.OS == "windows", nil -} - -// Append reads a layer from path and appends it the the v1.Image base. -// -// If the base image is a Windows base image (i.e., its config.OS is -// "windows"), the contents of the tarballs will be modified to be suitable for -// a Windows container image.`, -func Append(base v1.Image, paths ...string) (v1.Image, error) { - if base == nil { - return nil, fmt.Errorf("invalid argument: base") - } - - win, err := isWindows(base) - if err != nil { - return nil, fmt.Errorf("getting base image: %w", err) - } - - baseMediaType, err := base.MediaType() - - if err != nil { - return nil, fmt.Errorf("getting base image media type: %w", err) - } - - layerType := types.DockerLayer - - if baseMediaType == types.OCIManifestSchema1 { - layerType = types.OCILayer - } - - layers := make([]v1.Layer, 0, len(paths)) - for _, path := range paths { - layer, err := getLayer(path, layerType) - if err != nil { - return nil, fmt.Errorf("reading layer %q: %w", path, err) - } - - if win { - layer, err = windows.Windows(layer) - if err != nil { - return nil, fmt.Errorf("converting %q for Windows: %w", path, err) - } - } - - layers = append(layers, layer) - } - - return mutate.AppendLayers(base, layers...) -} - -func getLayer(path string, layerType types.MediaType) (v1.Layer, error) { - f, err := streamFile(path) - if err != nil { - return nil, err - } - if f != nil { - return stream.NewLayer(f, stream.WithMediaType(layerType)), nil - } - - return tarball.LayerFromFile(path, tarball.WithMediaType(layerType)) -} - -// If we're dealing with a named pipe, trying to open it multiple times will -// fail, so we need to do a streaming upload. -// -// returns nil, nil for non-streaming files -func streamFile(path string) (*os.File, error) { - if path == "-" { - return os.Stdin, nil - } - fi, err := os.Stat(path) - if err != nil { - return nil, err - } - - if !fi.Mode().IsRegular() { - return os.Open(path) - } - - return nil, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/catalog.go b/vendor/github.com/google/go-containerregistry/pkg/crane/catalog.go deleted file mode 100644 index f30800cca3..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/catalog.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crane - -import ( - "context" - - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote" -) - -// Catalog returns the repositories in a registry's catalog. -func Catalog(src string, opt ...Option) (res []string, err error) { - o := makeOptions(opt...) - reg, err := name.NewRegistry(src, o.Name...) - if err != nil { - return nil, err - } - - // This context gets overridden by remote.WithContext, which is set by - // crane.WithContext. - return remote.Catalog(context.Background(), reg, o.Remote...) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/config.go b/vendor/github.com/google/go-containerregistry/pkg/crane/config.go deleted file mode 100644 index 3e55cc93a7..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/config.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crane - -// Config returns the config file for the remote image ref. -func Config(ref string, opt ...Option) ([]byte, error) { - i, _, err := getImage(ref, opt...) - if err != nil { - return nil, err - } - return i.RawConfigFile() -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/copy.go b/vendor/github.com/google/go-containerregistry/pkg/crane/copy.go deleted file mode 100644 index bbdf5481fd..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/copy.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crane - -import ( - "errors" - "fmt" - "net/http" - - "github.com/google/go-containerregistry/pkg/logs" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "golang.org/x/sync/errgroup" -) - -// Copy copies a remote image or index from src to dst. -func Copy(src, dst string, opt ...Option) error { - o := makeOptions(opt...) - srcRef, err := name.ParseReference(src, o.Name...) - if err != nil { - return fmt.Errorf("parsing reference %q: %w", src, err) - } - - dstRef, err := name.ParseReference(dst, o.Name...) - if err != nil { - return fmt.Errorf("parsing reference for %q: %w", dst, err) - } - - puller, err := remote.NewPuller(o.Remote...) - if err != nil { - return err - } - - if tag, ok := dstRef.(name.Tag); ok { - if o.noclobber { - logs.Progress.Printf("Checking existing tag %v", tag) - head, err := puller.Head(o.ctx, tag) - var terr *transport.Error - if errors.As(err, &terr) { - if terr.StatusCode != http.StatusNotFound && terr.StatusCode != http.StatusForbidden { - return err - } - } else if err != nil { - return err - } - - if head != nil { - return fmt.Errorf("refusing to clobber existing tag %s@%s", tag, head.Digest) - } - } - } - - pusher, err := remote.NewPusher(o.Remote...) - if err != nil { - return err - } - - logs.Progress.Printf("Copying from %v to %v", srcRef, dstRef) - desc, err := puller.Get(o.ctx, srcRef) - if err != nil { - return fmt.Errorf("fetching %q: %w", src, err) - } - - if o.Platform == nil { - return pusher.Push(o.ctx, dstRef, desc) - } - - // If platform is explicitly set, don't copy the whole index, just the appropriate image. - img, err := desc.Image() - if err != nil { - return err - } - return pusher.Push(o.ctx, dstRef, img) -} - -// CopyRepository copies every tag from src to dst. -func CopyRepository(src, dst string, opt ...Option) error { - o := makeOptions(opt...) - - srcRepo, err := name.NewRepository(src, o.Name...) - if err != nil { - return err - } - - dstRepo, err := name.NewRepository(dst, o.Name...) - if err != nil { - return fmt.Errorf("parsing reference for %q: %w", dst, err) - } - - puller, err := remote.NewPuller(o.Remote...) - if err != nil { - return err - } - - ignoredTags := map[string]struct{}{} - if o.noclobber { - // TODO: It would be good to propagate noclobber down into remote so we can use Etags. - have, err := puller.List(o.ctx, dstRepo) - if err != nil { - var terr *transport.Error - if errors.As(err, &terr) { - // Some registries create repository on first push, so listing tags will fail. - // If we see 404 or 403, assume we failed because the repository hasn't been created yet. - if !(terr.StatusCode == http.StatusNotFound || terr.StatusCode == http.StatusForbidden) { - return err - } - } else { - return err - } - } - for _, tag := range have { - ignoredTags[tag] = struct{}{} - } - } - - pusher, err := remote.NewPusher(o.Remote...) - if err != nil { - return err - } - - lister, err := puller.Lister(o.ctx, srcRepo) - if err != nil { - return err - } - - g, ctx := errgroup.WithContext(o.ctx) - g.SetLimit(o.jobs) - - for lister.HasNext() { - tags, err := lister.Next(ctx) - if err != nil { - return err - } - - for _, tag := range tags.Tags { - tag := tag - - if o.noclobber { - if _, ok := ignoredTags[tag]; ok { - logs.Progress.Printf("Skipping %s due to no-clobber", tag) - continue - } - } - - g.Go(func() error { - srcTag, err := name.ParseReference(src+":"+tag, o.Name...) - if err != nil { - return fmt.Errorf("failed to parse tag: %w", err) - } - dstTag, err := name.ParseReference(dst+":"+tag, o.Name...) - if err != nil { - return fmt.Errorf("failed to parse tag: %w", err) - } - - logs.Progress.Printf("Fetching %s", srcTag) - desc, err := puller.Get(ctx, srcTag) - if err != nil { - return err - } - - logs.Progress.Printf("Pushing %s", dstTag) - return pusher.Push(ctx, dstTag, desc) - }) - } - } - - return g.Wait() -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/delete.go b/vendor/github.com/google/go-containerregistry/pkg/crane/delete.go deleted file mode 100644 index 58a8be1f0b..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/delete.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crane - -import ( - "fmt" - - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote" -) - -// Delete deletes the remote reference at src. -func Delete(src string, opt ...Option) error { - o := makeOptions(opt...) - ref, err := name.ParseReference(src, o.Name...) - if err != nil { - return fmt.Errorf("parsing reference %q: %w", src, err) - } - - return remote.Delete(ref, o.Remote...) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/digest.go b/vendor/github.com/google/go-containerregistry/pkg/crane/digest.go deleted file mode 100644 index 868a57010d..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/digest.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crane - -import "github.com/google/go-containerregistry/pkg/logs" - -// Digest returns the sha256 hash of the remote image at ref. -func Digest(ref string, opt ...Option) (string, error) { - o := makeOptions(opt...) - if o.Platform != nil { - desc, err := getManifest(ref, opt...) - if err != nil { - return "", err - } - if !desc.MediaType.IsIndex() { - return desc.Digest.String(), nil - } - - // TODO: does not work for indexes which contain schema v1 manifests - img, err := desc.Image() - if err != nil { - return "", err - } - digest, err := img.Digest() - if err != nil { - return "", err - } - return digest.String(), nil - } - desc, err := Head(ref, opt...) - if err != nil { - logs.Warn.Printf("HEAD request failed, falling back on GET: %v", err) - rdesc, err := getManifest(ref, opt...) - if err != nil { - return "", err - } - return rdesc.Digest.String(), nil - } - return desc.Digest.String(), nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/doc.go b/vendor/github.com/google/go-containerregistry/pkg/crane/doc.go deleted file mode 100644 index 7602d7953f..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package crane holds libraries used to implement the crane CLI. -package crane diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/export.go b/vendor/github.com/google/go-containerregistry/pkg/crane/export.go deleted file mode 100644 index b5e1296d21..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/export.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crane - -import ( - "io" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/mutate" -) - -// Export writes the filesystem contents (as a tarball) of img to w. -// If img has a single layer, just write the (uncompressed) contents to w so -// that this "just works" for images that just wrap a single blob. -func Export(img v1.Image, w io.Writer) error { - layers, err := img.Layers() - if err != nil { - return err - } - if len(layers) == 1 { - // If it's a single layer... - l := layers[0] - mt, err := l.MediaType() - if err != nil { - return err - } - - if !mt.IsLayer() { - // ...and isn't an OCI mediaType, we don't have to flatten it. - // This lets export work for single layer, non-tarball images. - rc, err := l.Uncompressed() - if err != nil { - return err - } - _, err = io.Copy(w, rc) - return err - } - } - fs := mutate.Extract(img) - _, err = io.Copy(w, fs) - return err -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/filemap.go b/vendor/github.com/google/go-containerregistry/pkg/crane/filemap.go deleted file mode 100644 index 36dfc2a644..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/filemap.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crane - -import ( - "archive/tar" - "bytes" - "io" - "sort" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/empty" - "github.com/google/go-containerregistry/pkg/v1/mutate" - "github.com/google/go-containerregistry/pkg/v1/tarball" -) - -// Layer creates a layer from a single file map. These layers are reproducible and consistent. -// A filemap is a path -> file content map representing a file system. -func Layer(filemap map[string][]byte) (v1.Layer, error) { - b := &bytes.Buffer{} - w := tar.NewWriter(b) - - fn := []string{} - for f := range filemap { - fn = append(fn, f) - } - sort.Strings(fn) - - for _, f := range fn { - c := filemap[f] - if err := w.WriteHeader(&tar.Header{ - Name: f, - Size: int64(len(c)), - }); err != nil { - return nil, err - } - if _, err := w.Write(c); err != nil { - return nil, err - } - } - if err := w.Close(); err != nil { - return nil, err - } - - // Return a new copy of the buffer each time it's opened. - return tarball.LayerFromOpener(func() (io.ReadCloser, error) { - return io.NopCloser(bytes.NewBuffer(b.Bytes())), nil - }) -} - -// Image creates a image with the given filemaps as its contents. These images are reproducible and consistent. -// A filemap is a path -> file content map representing a file system. -func Image(filemap map[string][]byte) (v1.Image, error) { - y, err := Layer(filemap) - if err != nil { - return nil, err - } - - return mutate.AppendLayers(empty.Image, y) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/get.go b/vendor/github.com/google/go-containerregistry/pkg/crane/get.go deleted file mode 100644 index 98a2e8933e..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/get.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crane - -import ( - "fmt" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/remote" -) - -func getImage(r string, opt ...Option) (v1.Image, name.Reference, error) { - o := makeOptions(opt...) - ref, err := name.ParseReference(r, o.Name...) - if err != nil { - return nil, nil, fmt.Errorf("parsing reference %q: %w", r, err) - } - img, err := remote.Image(ref, o.Remote...) - if err != nil { - return nil, nil, fmt.Errorf("reading image %q: %w", ref, err) - } - return img, ref, nil -} - -func getManifest(r string, opt ...Option) (*remote.Descriptor, error) { - o := makeOptions(opt...) - ref, err := name.ParseReference(r, o.Name...) - if err != nil { - return nil, fmt.Errorf("parsing reference %q: %w", r, err) - } - return remote.Get(ref, o.Remote...) -} - -// Get calls remote.Get and returns an uninterpreted response. -func Get(r string, opt ...Option) (*remote.Descriptor, error) { - return getManifest(r, opt...) -} - -// Head performs a HEAD request for a manifest and returns a content descriptor -// based on the registry's response. -func Head(r string, opt ...Option) (*v1.Descriptor, error) { - o := makeOptions(opt...) - ref, err := name.ParseReference(r, o.Name...) - if err != nil { - return nil, err - } - return remote.Head(ref, o.Remote...) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/list.go b/vendor/github.com/google/go-containerregistry/pkg/crane/list.go deleted file mode 100644 index 38352153bb..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/list.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crane - -import ( - "fmt" - - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote" -) - -// ListTags returns the tags in repository src. -func ListTags(src string, opt ...Option) ([]string, error) { - o := makeOptions(opt...) - repo, err := name.NewRepository(src, o.Name...) - if err != nil { - return nil, fmt.Errorf("parsing repo %q: %w", src, err) - } - - return remote.List(repo, o.Remote...) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/manifest.go b/vendor/github.com/google/go-containerregistry/pkg/crane/manifest.go deleted file mode 100644 index a54926aef3..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/manifest.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crane - -// Manifest returns the manifest for the remote image or index ref. -func Manifest(ref string, opt ...Option) ([]byte, error) { - desc, err := getManifest(ref, opt...) - if err != nil { - return nil, err - } - o := makeOptions(opt...) - if o.Platform != nil { - img, err := desc.Image() - if err != nil { - return nil, err - } - return img.RawManifest() - } - return desc.Manifest, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/options.go b/vendor/github.com/google/go-containerregistry/pkg/crane/options.go deleted file mode 100644 index d9d4417619..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/options.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crane - -import ( - "context" - "crypto/tls" - "net/http" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/remote" -) - -// Options hold the options that crane uses when calling other packages. -type Options struct { - Name []name.Option - Remote []remote.Option - Platform *v1.Platform - Keychain authn.Keychain - Transport http.RoundTripper - - auth authn.Authenticator - insecure bool - jobs int - noclobber bool - ctx context.Context -} - -// GetOptions exposes the underlying []remote.Option, []name.Option, and -// platform, based on the passed Option. Generally, you shouldn't need to use -// this unless you've painted yourself into a dependency corner as we have -// with the crane and gcrane cli packages. -func GetOptions(opts ...Option) Options { - return makeOptions(opts...) -} - -func makeOptions(opts ...Option) Options { - opt := Options{ - Remote: []remote.Option{ - remote.WithAuthFromKeychain(authn.DefaultKeychain), - }, - Keychain: authn.DefaultKeychain, - jobs: 4, - ctx: context.Background(), - } - - for _, o := range opts { - o(&opt) - } - - // Allow for untrusted certificates if the user - // passed Insecure but no custom transport. - if opt.insecure && opt.Transport == nil { - transport := remote.DefaultTransport.(*http.Transport).Clone() - transport.TLSClientConfig = &tls.Config{ - InsecureSkipVerify: true, //nolint: gosec - } - - WithTransport(transport)(&opt) - } else if opt.Transport == nil { - opt.Transport = remote.DefaultTransport - } - - return opt -} - -// Option is a functional option for crane. -type Option func(*Options) - -// WithTransport is a functional option for overriding the default transport -// for remote operations. Setting a transport will override the Insecure option's -// configuration allowing for image registries to use untrusted certificates. -func WithTransport(t http.RoundTripper) Option { - return func(o *Options) { - o.Remote = append(o.Remote, remote.WithTransport(t)) - o.Transport = t - } -} - -// Insecure is an Option that allows image references to be fetched without TLS. -// This will also allow for untrusted (e.g. self-signed) certificates in cases where -// the default transport is used (i.e. when WithTransport is not used). -func Insecure(o *Options) { - o.Name = append(o.Name, name.Insecure) - o.insecure = true -} - -// WithPlatform is an Option to specify the platform. -func WithPlatform(platform *v1.Platform) Option { - return func(o *Options) { - if platform != nil { - o.Remote = append(o.Remote, remote.WithPlatform(*platform)) - } - o.Platform = platform - } -} - -// WithAuthFromKeychain is a functional option for overriding the default -// authenticator for remote operations, using an authn.Keychain to find -// credentials. -// -// By default, crane will use authn.DefaultKeychain. -func WithAuthFromKeychain(keys authn.Keychain) Option { - return func(o *Options) { - // Replace the default keychain at position 0. - o.Remote[0] = remote.WithAuthFromKeychain(keys) - o.Keychain = keys - } -} - -// WithAuth is a functional option for overriding the default authenticator -// for remote operations. -// -// By default, crane will use authn.DefaultKeychain. -func WithAuth(auth authn.Authenticator) Option { - return func(o *Options) { - // Replace the default keychain at position 0. - o.Remote[0] = remote.WithAuth(auth) - o.auth = auth - } -} - -// WithUserAgent adds the given string to the User-Agent header for any HTTP -// requests. -func WithUserAgent(ua string) Option { - return func(o *Options) { - o.Remote = append(o.Remote, remote.WithUserAgent(ua)) - } -} - -// WithNondistributable is an option that allows pushing non-distributable -// layers. -func WithNondistributable() Option { - return func(o *Options) { - o.Remote = append(o.Remote, remote.WithNondistributable) - } -} - -// WithContext is a functional option for setting the context. -func WithContext(ctx context.Context) Option { - return func(o *Options) { - o.ctx = ctx - o.Remote = append(o.Remote, remote.WithContext(ctx)) - } -} - -// WithJobs sets the number of concurrent jobs to run. -// -// The default number of jobs is GOMAXPROCS. -func WithJobs(jobs int) Option { - return func(o *Options) { - if jobs > 0 { - o.jobs = jobs - } - o.Remote = append(o.Remote, remote.WithJobs(o.jobs)) - } -} - -// WithNoClobber modifies behavior to avoid overwriting existing tags, if possible. -func WithNoClobber(noclobber bool) Option { - return func(o *Options) { - o.noclobber = noclobber - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/pull.go b/vendor/github.com/google/go-containerregistry/pkg/crane/pull.go deleted file mode 100644 index 7e6e5b7b6e..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/pull.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crane - -import ( - "fmt" - "os" - - legacy "github.com/google/go-containerregistry/pkg/legacy/tarball" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/empty" - "github.com/google/go-containerregistry/pkg/v1/layout" - "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/google/go-containerregistry/pkg/v1/tarball" -) - -// Tag applied to images that were pulled by digest. This denotes that the -// image was (probably) never tagged with this, but lets us avoid applying the -// ":latest" tag which might be misleading. -const iWasADigestTag = "i-was-a-digest" - -// Pull returns a v1.Image of the remote image src. -func Pull(src string, opt ...Option) (v1.Image, error) { - o := makeOptions(opt...) - ref, err := name.ParseReference(src, o.Name...) - if err != nil { - return nil, fmt.Errorf("parsing reference %q: %w", src, err) - } - - return remote.Image(ref, o.Remote...) -} - -// Save writes the v1.Image img as a tarball at path with tag src. -func Save(img v1.Image, src, path string) error { - imgMap := map[string]v1.Image{src: img} - return MultiSave(imgMap, path) -} - -// MultiSave writes collection of v1.Image img with tag as a tarball. -func MultiSave(imgMap map[string]v1.Image, path string, opt ...Option) error { - o := makeOptions(opt...) - tagToImage := map[name.Tag]v1.Image{} - - for src, img := range imgMap { - ref, err := name.ParseReference(src, o.Name...) - if err != nil { - return fmt.Errorf("parsing ref %q: %w", src, err) - } - - // WriteToFile wants a tag to write to the tarball, but we might have - // been given a digest. - // If the original ref was a tag, use that. Otherwise, if it was a - // digest, tag the image with :i-was-a-digest instead. - tag, ok := ref.(name.Tag) - if !ok { - d, ok := ref.(name.Digest) - if !ok { - return fmt.Errorf("ref wasn't a tag or digest") - } - tag = d.Repository.Tag(iWasADigestTag) - } - tagToImage[tag] = img - } - // no progress channel (for now) - return tarball.MultiWriteToFile(path, tagToImage) -} - -// PullLayer returns the given layer from a registry. -func PullLayer(ref string, opt ...Option) (v1.Layer, error) { - o := makeOptions(opt...) - digest, err := name.NewDigest(ref, o.Name...) - if err != nil { - return nil, err - } - - return remote.Layer(digest, o.Remote...) -} - -// SaveLegacy writes the v1.Image img as a legacy tarball at path with tag src. -func SaveLegacy(img v1.Image, src, path string) error { - imgMap := map[string]v1.Image{src: img} - return MultiSave(imgMap, path) -} - -// MultiSaveLegacy writes collection of v1.Image img with tag as a legacy tarball. -func MultiSaveLegacy(imgMap map[string]v1.Image, path string) error { - refToImage := map[name.Reference]v1.Image{} - - for src, img := range imgMap { - ref, err := name.ParseReference(src) - if err != nil { - return fmt.Errorf("parsing ref %q: %w", src, err) - } - refToImage[ref] = img - } - - w, err := os.Create(path) - if err != nil { - return err - } - defer w.Close() - - return legacy.MultiWrite(refToImage, w) -} - -// SaveOCI writes the v1.Image img as an OCI Image Layout at path. If a layout -// already exists at that path, it will add the image to the index. -func SaveOCI(img v1.Image, path string) error { - imgMap := map[string]v1.Image{"": img} - return MultiSaveOCI(imgMap, path) -} - -// MultiSaveOCI writes collection of v1.Image img as an OCI Image Layout at path. If a layout -// already exists at that path, it will add the image to the index. -func MultiSaveOCI(imgMap map[string]v1.Image, path string) error { - p, err := layout.FromPath(path) - if err != nil { - p, err = layout.Write(path, empty.Index) - if err != nil { - return err - } - } - for _, img := range imgMap { - if err = p.AppendImage(img); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/push.go b/vendor/github.com/google/go-containerregistry/pkg/crane/push.go deleted file mode 100644 index 90a0585021..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/push.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crane - -import ( - "fmt" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/google/go-containerregistry/pkg/v1/tarball" -) - -// Load reads the tarball at path as a v1.Image. -func Load(path string, opt ...Option) (v1.Image, error) { - return LoadTag(path, "", opt...) -} - -// LoadTag reads a tag from the tarball at path as a v1.Image. -// If tag is "", will attempt to read the tarball as a single image. -func LoadTag(path, tag string, opt ...Option) (v1.Image, error) { - if tag == "" { - return tarball.ImageFromPath(path, nil) - } - - o := makeOptions(opt...) - t, err := name.NewTag(tag, o.Name...) - if err != nil { - return nil, fmt.Errorf("parsing tag %q: %w", tag, err) - } - return tarball.ImageFromPath(path, &t) -} - -// Push pushes the v1.Image img to a registry as dst. -func Push(img v1.Image, dst string, opt ...Option) error { - o := makeOptions(opt...) - tag, err := name.ParseReference(dst, o.Name...) - if err != nil { - return fmt.Errorf("parsing reference %q: %w", dst, err) - } - return remote.Write(tag, img, o.Remote...) -} - -// Upload pushes the v1.Layer to a given repo. -func Upload(layer v1.Layer, repo string, opt ...Option) error { - o := makeOptions(opt...) - ref, err := name.NewRepository(repo, o.Name...) - if err != nil { - return fmt.Errorf("parsing repo %q: %w", repo, err) - } - - return remote.WriteLayer(ref, layer, o.Remote...) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/tag.go b/vendor/github.com/google/go-containerregistry/pkg/crane/tag.go deleted file mode 100644 index 13bc395872..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/crane/tag.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crane - -import ( - "fmt" - - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote" -) - -// Tag adds tag to the remote img. -func Tag(img, tag string, opt ...Option) error { - o := makeOptions(opt...) - ref, err := name.ParseReference(img, o.Name...) - if err != nil { - return fmt.Errorf("parsing reference %q: %w", img, err) - } - desc, err := remote.Get(ref, o.Remote...) - if err != nil { - return fmt.Errorf("fetching %q: %w", img, err) - } - - dst := ref.Context().Tag(tag) - - return remote.Tag(dst, desc, o.Remote...) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/legacy/config.go b/vendor/github.com/google/go-containerregistry/pkg/legacy/config.go deleted file mode 100644 index 3364bec61c..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/legacy/config.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacy - -import ( - v1 "github.com/google/go-containerregistry/pkg/v1" -) - -// LayerConfigFile is the configuration file that holds the metadata describing -// a v1 layer. See: -// https://github.com/moby/moby/blob/master/image/spec/v1.md -type LayerConfigFile struct { - v1.ConfigFile - - ContainerConfig v1.Config `json:"container_config,omitempty"` - - ID string `json:"id,omitempty"` - Parent string `json:"parent,omitempty"` - Throwaway bool `json:"throwaway,omitempty"` - Comment string `json:"comment,omitempty"` -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/legacy/doc.go b/vendor/github.com/google/go-containerregistry/pkg/legacy/doc.go deleted file mode 100644 index 1d1668887a..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/legacy/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package legacy provides functionality to work with docker images in the v1 -// format. -// See: https://github.com/moby/moby/blob/master/image/spec/v1.md -package legacy diff --git a/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/README.md b/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/README.md deleted file mode 100644 index 90b88c7578..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# `legacy/tarball` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/legacy/tarball?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/legacy/tarball) - -This package implements support for writing legacy tarballs, as described -[here](https://github.com/moby/moby/blob/749d90e10f989802638ae542daf54257f3bf71f2/image/spec/v1.2.md#combined-image-json--filesystem-changeset-format). diff --git a/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/doc.go b/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/doc.go deleted file mode 100644 index 62684d6e72..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package tarball provides facilities for writing v1 docker images -// (https://github.com/moby/moby/blob/master/image/spec/v1.md) from/to a tarball -// on-disk. -package tarball diff --git a/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/write.go b/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/write.go deleted file mode 100644 index 627bfbfdb1..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/write.go +++ /dev/null @@ -1,371 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tarball - -import ( - "archive/tar" - "bytes" - "encoding/json" - "fmt" - "io" - "sort" - "strings" - - "github.com/google/go-containerregistry/pkg/legacy" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/tarball" -) - -// repositoriesTarDescriptor represents the repositories file inside a `docker save` tarball. -type repositoriesTarDescriptor map[string]map[string]string - -// v1Layer represents a layer with metadata needed by the v1 image spec https://github.com/moby/moby/blob/master/image/spec/v1.md. -type v1Layer struct { - // config is the layer metadata. - config *legacy.LayerConfigFile - // layer is the v1.Layer object this v1Layer represents. - layer v1.Layer -} - -// json returns the raw bytes of the json metadata of the given v1Layer. -func (l *v1Layer) json() ([]byte, error) { - return json.Marshal(l.config) -} - -// version returns the raw bytes of the "VERSION" file of the given v1Layer. -func (l *v1Layer) version() []byte { - return []byte("1.0") -} - -// v1LayerID computes the v1 image format layer id for the given v1.Layer with the given v1 parent ID and raw image config. -func v1LayerID(layer v1.Layer, parentID string, rawConfig []byte) (string, error) { - d, err := layer.Digest() - if err != nil { - return "", fmt.Errorf("unable to get layer digest to generate v1 layer ID: %w", err) - } - s := fmt.Sprintf("%s %s", d.Hex, parentID) - if len(rawConfig) != 0 { - s = fmt.Sprintf("%s %s", s, string(rawConfig)) - } - - h, _, _ := v1.SHA256(strings.NewReader(s)) - return h.Hex, nil -} - -// newTopV1Layer creates a new v1Layer for a layer other than the top layer in a v1 image tarball. -func newV1Layer(layer v1.Layer, parent *v1Layer, history v1.History) (*v1Layer, error) { - parentID := "" - if parent != nil { - parentID = parent.config.ID - } - id, err := v1LayerID(layer, parentID, nil) - if err != nil { - return nil, fmt.Errorf("unable to generate v1 layer ID: %w", err) - } - result := &v1Layer{ - layer: layer, - config: &legacy.LayerConfigFile{ - ConfigFile: v1.ConfigFile{ - Created: history.Created, - Author: history.Author, - }, - ContainerConfig: v1.Config{ - Cmd: []string{history.CreatedBy}, - }, - ID: id, - Parent: parentID, - Throwaway: history.EmptyLayer, - Comment: history.Comment, - }, - } - return result, nil -} - -// newTopV1Layer creates a new v1Layer for the top layer in a v1 image tarball. -func newTopV1Layer(layer v1.Layer, parent *v1Layer, history v1.History, imgConfig *v1.ConfigFile, rawConfig []byte) (*v1Layer, error) { - result, err := newV1Layer(layer, parent, history) - if err != nil { - return nil, err - } - id, err := v1LayerID(layer, result.config.Parent, rawConfig) - if err != nil { - return nil, fmt.Errorf("unable to generate v1 layer ID for top layer: %w", err) - } - result.config.ID = id - result.config.Architecture = imgConfig.Architecture - result.config.Container = imgConfig.Container - result.config.DockerVersion = imgConfig.DockerVersion - result.config.OS = imgConfig.OS - result.config.Config = imgConfig.Config - result.config.Created = imgConfig.Created - return result, nil -} - -// splitTag splits the given tagged image name /: -// into / and . -func splitTag(name string) (string, string) { - // Split on ":" - parts := strings.Split(name, ":") - // Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation. - if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], "/") { - base := strings.Join(parts[:len(parts)-1], ":") - tag := parts[len(parts)-1] - return base, tag - } - return name, "" -} - -// addTags adds the given image tags to the given "repositories" file descriptor in a v1 image tarball. -func addTags(repos repositoriesTarDescriptor, tags []string, topLayerID string) { - for _, t := range tags { - base, tag := splitTag(t) - tagToID, ok := repos[base] - if !ok { - tagToID = make(map[string]string) - repos[base] = tagToID - } - tagToID[tag] = topLayerID - } -} - -// updateLayerSources updates the given layer digest to descriptor map with the descriptor of the given layer in the given image if it's an undistributable layer. -func updateLayerSources(layerSources map[v1.Hash]v1.Descriptor, layer v1.Layer, img v1.Image) error { - d, err := layer.Digest() - if err != nil { - return err - } - // Add to LayerSources if it's a foreign layer. - desc, err := partial.BlobDescriptor(img, d) - if err != nil { - return err - } - if !desc.MediaType.IsDistributable() { - diffid, err := partial.BlobToDiffID(img, d) - if err != nil { - return err - } - layerSources[diffid] = *desc - } - return nil -} - -// Write is a wrapper to write a single image in V1 format and tag to a tarball. -func Write(ref name.Reference, img v1.Image, w io.Writer) error { - return MultiWrite(map[name.Reference]v1.Image{ref: img}, w) -} - -// filterEmpty filters out the history corresponding to empty layers from the -// given history. -func filterEmpty(h []v1.History) []v1.History { - result := []v1.History{} - for _, i := range h { - if i.EmptyLayer { - continue - } - result = append(result, i) - } - return result -} - -// MultiWrite writes the contents of each image to the provided reader, in the V1 image tarball format. -// The contents are written in the following format: -// One manifest.json file at the top level containing information about several images. -// One repositories file mapping from the image / to to the id of the top most layer. -// For every layer, a directory named with the layer ID is created with the following contents: -// -// layer.tar - The uncompressed layer tarball. -// .json- Layer metadata json. -// VERSION- Schema version string. Always set to "1.0". -// -// One file for the config blob, named after its SHA. -func MultiWrite(refToImage map[name.Reference]v1.Image, w io.Writer) error { - tf := tar.NewWriter(w) - defer tf.Close() - - sortedImages, imageToTags := dedupRefToImage(refToImage) - var m tarball.Manifest - repos := make(repositoriesTarDescriptor) - - seenLayerIDs := make(map[string]struct{}) - for _, img := range sortedImages { - tags := imageToTags[img] - - // Write the config. - cfgName, err := img.ConfigName() - if err != nil { - return err - } - cfgFileName := fmt.Sprintf("%s.json", cfgName.Hex) - cfgBlob, err := img.RawConfigFile() - if err != nil { - return err - } - if err := writeTarEntry(tf, cfgFileName, bytes.NewReader(cfgBlob), int64(len(cfgBlob))); err != nil { - return err - } - cfg, err := img.ConfigFile() - if err != nil { - return err - } - - // Store foreign layer info. - layerSources := make(map[v1.Hash]v1.Descriptor) - - // Write the layers. - layers, err := img.Layers() - if err != nil { - return err - } - history := filterEmpty(cfg.History) - // Create a blank config history if the config didn't have a history. - if len(history) == 0 && len(layers) != 0 { - history = make([]v1.History, len(layers)) - } else if len(layers) != len(history) { - return fmt.Errorf("image config had layer history which did not match the number of layers, got len(history)=%d, len(layers)=%d, want len(history)=len(layers)", len(history), len(layers)) - } - layerFiles := make([]string, len(layers)) - var prev *v1Layer - for i, l := range layers { - if err := updateLayerSources(layerSources, l, img); err != nil { - return fmt.Errorf("unable to update image metadata to include undistributable layer source information: %w", err) - } - var cur *v1Layer - if i < (len(layers) - 1) { - cur, err = newV1Layer(l, prev, history[i]) - } else { - cur, err = newTopV1Layer(l, prev, history[i], cfg, cfgBlob) - } - if err != nil { - return err - } - layerFiles[i] = fmt.Sprintf("%s/layer.tar", cur.config.ID) - if _, ok := seenLayerIDs[cur.config.ID]; ok { - prev = cur - continue - } - seenLayerIDs[cur.config.ID] = struct{}{} - - // If the v1.Layer implements UncompressedSize efficiently, use that - // for the tar header. Otherwise, this iterates over Uncompressed(). - // NOTE: If using a streaming layer, this may consume the layer. - size, err := partial.UncompressedSize(l) - if err != nil { - return err - } - u, err := l.Uncompressed() - if err != nil { - return err - } - defer u.Close() - if err := writeTarEntry(tf, layerFiles[i], u, size); err != nil { - return err - } - - j, err := cur.json() - if err != nil { - return err - } - if err := writeTarEntry(tf, fmt.Sprintf("%s/json", cur.config.ID), bytes.NewReader(j), int64(len(j))); err != nil { - return err - } - v := cur.version() - if err := writeTarEntry(tf, fmt.Sprintf("%s/VERSION", cur.config.ID), bytes.NewReader(v), int64(len(v))); err != nil { - return err - } - prev = cur - } - - // Generate the tar descriptor and write it. - m = append(m, tarball.Descriptor{ - Config: cfgFileName, - RepoTags: tags, - Layers: layerFiles, - LayerSources: layerSources, - }) - // prev should be the top layer here. Use it to add the image tags - // to the tarball repositories file. - addTags(repos, tags, prev.config.ID) - } - - mBytes, err := json.Marshal(m) - if err != nil { - return err - } - - if err := writeTarEntry(tf, "manifest.json", bytes.NewReader(mBytes), int64(len(mBytes))); err != nil { - return err - } - reposBytes, err := json.Marshal(&repos) - if err != nil { - return err - } - return writeTarEntry(tf, "repositories", bytes.NewReader(reposBytes), int64(len(reposBytes))) -} - -func dedupRefToImage(refToImage map[name.Reference]v1.Image) ([]v1.Image, map[v1.Image][]string) { - imageToTags := make(map[v1.Image][]string) - - for ref, img := range refToImage { - if tag, ok := ref.(name.Tag); ok { - if tags, ok := imageToTags[img]; ok && tags != nil { - imageToTags[img] = append(tags, tag.String()) - } else { - imageToTags[img] = []string{tag.String()} - } - } else { - if _, ok := imageToTags[img]; !ok { - imageToTags[img] = nil - } - } - } - - // Force specific order on tags - imgs := []v1.Image{} - for img, tags := range imageToTags { - sort.Strings(tags) - imgs = append(imgs, img) - } - - sort.Slice(imgs, func(i, j int) bool { - cfI, err := imgs[i].ConfigName() - if err != nil { - return false - } - cfJ, err := imgs[j].ConfigName() - if err != nil { - return false - } - return cfI.Hex < cfJ.Hex - }) - - return imgs, imageToTags -} - -// Writes a file to the provided writer with a corresponding tar header -func writeTarEntry(tf *tar.Writer, path string, r io.Reader, size int64) error { - hdr := &tar.Header{ - Mode: 0644, - Typeflag: tar.TypeReg, - Size: size, - Name: path, - } - if err := tf.WriteHeader(hdr); err != nil { - return err - } - _, err := io.Copy(tf, r) - return err -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go b/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go deleted file mode 100644 index a5d25b1887..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package logs exposes the loggers used by this library. -package logs - -import ( - "io" - "log" -) - -var ( - // Warn is used to log non-fatal errors. - Warn = log.New(io.Discard, "", log.LstdFlags) - - // Progress is used to log notable, successful events. - Progress = log.New(io.Discard, "", log.LstdFlags) - - // Debug is used to log information that is useful for debugging. - Debug = log.New(io.Discard, "", log.LstdFlags) -) - -// Enabled checks to see if the logger's writer is set to something other -// than io.Discard. This allows callers to avoid expensive operations -// that will end up in /dev/null anyway. -func Enabled(l *log.Logger) bool { - return l.Writer() != io.Discard -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/README.md b/vendor/github.com/google/go-containerregistry/pkg/name/README.md deleted file mode 100644 index 4889b8446a..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# `name` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/name?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/name) diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/check.go b/vendor/github.com/google/go-containerregistry/pkg/name/check.go deleted file mode 100644 index e9a240a3e5..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/check.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "strings" - "unicode/utf8" -) - -// stripRunesFn returns a function which returns -1 (i.e. a value which -// signals deletion in strings.Map) for runes in 'runes', and the rune otherwise. -func stripRunesFn(runes string) func(rune) rune { - return func(r rune) rune { - if strings.ContainsRune(runes, r) { - return -1 - } - return r - } -} - -// checkElement checks a given named element matches character and length restrictions. -// Returns true if the given element adheres to the given restrictions, false otherwise. -func checkElement(name, element, allowedRunes string, minRunes, maxRunes int) error { - numRunes := utf8.RuneCountInString(element) - if (numRunes < minRunes) || (maxRunes < numRunes) { - return newErrBadName("%s must be between %d and %d characters in length: %s", name, minRunes, maxRunes, element) - } else if len(strings.Map(stripRunesFn(allowedRunes), element)) != 0 { - return newErrBadName("%s can only contain the characters `%s`: %s", name, allowedRunes, element) - } - return nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go deleted file mode 100644 index c049c1ef48..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - // nolint: depguard - _ "crypto/sha256" // Recommended by go-digest. - "strings" - - "github.com/opencontainers/go-digest" -) - -const digestDelim = "@" - -// Digest stores a digest name in a structured form. -type Digest struct { - Repository - digest string - original string -} - -// Ensure Digest implements Reference -var _ Reference = (*Digest)(nil) - -// Context implements Reference. -func (d Digest) Context() Repository { - return d.Repository -} - -// Identifier implements Reference. -func (d Digest) Identifier() string { - return d.DigestStr() -} - -// DigestStr returns the digest component of the Digest. -func (d Digest) DigestStr() string { - return d.digest -} - -// Name returns the name from which the Digest was derived. -func (d Digest) Name() string { - return d.Repository.Name() + digestDelim + d.DigestStr() -} - -// String returns the original input string. -func (d Digest) String() string { - return d.original -} - -// NewDigest returns a new Digest representing the given name. -func NewDigest(name string, opts ...Option) (Digest, error) { - // Split on "@" - parts := strings.Split(name, digestDelim) - if len(parts) != 2 { - return Digest{}, newErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name) - } - base := parts[0] - dig := parts[1] - prefix := digest.Canonical.String() + ":" - if !strings.HasPrefix(dig, prefix) { - return Digest{}, newErrBadName("unsupported digest algorithm: %s", dig) - } - hex := strings.TrimPrefix(dig, prefix) - if err := digest.Canonical.Validate(hex); err != nil { - return Digest{}, err - } - - tag, err := NewTag(base, opts...) - if err == nil { - base = tag.Repository.Name() - } - - repo, err := NewRepository(base, opts...) - if err != nil { - return Digest{}, err - } - return Digest{ - Repository: repo, - digest: dig, - original: name, - }, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/doc.go b/vendor/github.com/google/go-containerregistry/pkg/name/doc.go deleted file mode 100644 index b294794dc1..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/doc.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package name defines structured types for representing image references. -// -// What's in a name? For image references, not nearly enough! -// -// Image references look a lot like URLs, but they differ in that they don't -// contain the scheme (http or https), they can end with a :tag or a @digest -// (the latter being validated), and they perform defaulting for missing -// components. -// -// Since image references don't contain the scheme, we do our best to infer -// if we use http or https from the given hostname. We allow http fallback for -// any host that looks like localhost (localhost, 127.0.0.1, ::1), ends in -// ".local", or is in the "private" address space per RFC 1918. For everything -// else, we assume https only. To override this heuristic, use the Insecure -// option. -// -// Image references with a digest signal to us that we should verify the content -// of the image matches the digest. E.g. when pulling a Digest reference, we'll -// calculate the sha256 of the manifest returned by the registry and error out -// if it doesn't match what we asked for. -// -// For defaulting, we interpret "ubuntu" as -// "index.docker.io/library/ubuntu:latest" because we add the missing repo -// "library", the missing registry "index.docker.io", and the missing tag -// "latest". To disable this defaulting, use the StrictValidation option. This -// is useful e.g. to only allow image references that explicitly set a tag or -// digest, so that you don't accidentally pull "latest". -package name diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/errors.go b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go deleted file mode 100644 index bf004ffcfb..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/errors.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "errors" - "fmt" -) - -// ErrBadName is an error for when a bad docker name is supplied. -type ErrBadName struct { - info string -} - -func (e *ErrBadName) Error() string { - return e.info -} - -// Is reports whether target is an error of type ErrBadName -func (e *ErrBadName) Is(target error) bool { - var berr *ErrBadName - return errors.As(target, &berr) -} - -// newErrBadName returns a ErrBadName which returns the given formatted string from Error(). -func newErrBadName(fmtStr string, args ...any) *ErrBadName { - return &ErrBadName{fmt.Sprintf(fmtStr, args...)} -} - -// IsErrBadName returns true if the given error is an ErrBadName. -// -// Deprecated: Use errors.Is. -func IsErrBadName(err error) bool { - var berr *ErrBadName - return errors.As(err, &berr) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/options.go b/vendor/github.com/google/go-containerregistry/pkg/name/options.go deleted file mode 100644 index d14fedcdad..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/options.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -const ( - // DefaultRegistry is the registry name that will be used if no registry - // provided and the default is not overridden. - DefaultRegistry = "index.docker.io" - defaultRegistryAlias = "docker.io" - - // DefaultTag is the tag name that will be used if no tag provided and the - // default is not overridden. - DefaultTag = "latest" -) - -type options struct { - strict bool // weak by default - insecure bool // secure by default - defaultRegistry string - defaultTag string -} - -func makeOptions(opts ...Option) options { - opt := options{ - defaultRegistry: DefaultRegistry, - defaultTag: DefaultTag, - } - for _, o := range opts { - o(&opt) - } - return opt -} - -// Option is a functional option for name parsing. -type Option func(*options) - -// StrictValidation is an Option that requires image references to be fully -// specified; i.e. no defaulting for registry (dockerhub), repo (library), -// or tag (latest). -func StrictValidation(opts *options) { - opts.strict = true -} - -// WeakValidation is an Option that sets defaults when parsing names, see -// StrictValidation. -func WeakValidation(opts *options) { - opts.strict = false -} - -// Insecure is an Option that allows image references to be fetched without TLS. -func Insecure(opts *options) { - opts.insecure = true -} - -// OptionFn is a function that returns an option. -type OptionFn func() Option - -// WithDefaultRegistry sets the default registry that will be used if one is not -// provided. -func WithDefaultRegistry(r string) Option { - return func(opts *options) { - opts.defaultRegistry = r - } -} - -// WithDefaultTag sets the default tag that will be used if one is not provided. -func WithDefaultTag(t string) Option { - return func(opts *options) { - opts.defaultTag = t - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go deleted file mode 100644 index 912ab33018..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "fmt" -) - -// Reference defines the interface that consumers use when they can -// take either a tag or a digest. -type Reference interface { - fmt.Stringer - - // Context accesses the Repository context of the reference. - Context() Repository - - // Identifier accesses the type-specific portion of the reference. - Identifier() string - - // Name is the fully-qualified reference name. - Name() string - - // Scope is the scope needed to access this reference. - Scope(string) string -} - -// ParseReference parses the string as a reference, either by tag or digest. -func ParseReference(s string, opts ...Option) (Reference, error) { - if t, err := NewTag(s, opts...); err == nil { - return t, nil - } - if d, err := NewDigest(s, opts...); err == nil { - return d, nil - } - return nil, newErrBadName("could not parse reference: " + s) -} - -type stringConst string - -// MustParseReference behaves like ParseReference, but panics instead of -// returning an error. It's intended for use in tests, or when a value is -// expected to be valid at code authoring time. -// -// To discourage its use in scenarios where the value is not known at code -// authoring time, it must be passed a string constant: -// -// const str = "valid/string" -// MustParseReference(str) -// MustParseReference("another/valid/string") -// MustParseReference(str + "/and/more") -// -// These will not compile: -// -// var str = "valid/string" -// MustParseReference(str) -// MustParseReference(strings.Join([]string{"valid", "string"}, "/")) -func MustParseReference(s stringConst, opts ...Option) Reference { - ref, err := ParseReference(string(s), opts...) - if err != nil { - panic(err) - } - return ref -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go deleted file mode 100644 index 5b0d01769c..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "net" - "net/url" - "path" - "regexp" - "strings" -) - -// Detect more complex forms of local references. -var reLocal = regexp.MustCompile(`.*\.local(?:host)?(?::\d{1,5})?$`) - -// Detect the loopback IP (127.0.0.1) -var reLoopback = regexp.MustCompile(regexp.QuoteMeta("127.0.0.1")) - -// Detect the loopback IPV6 (::1) -var reipv6Loopback = regexp.MustCompile(regexp.QuoteMeta("::1")) - -// Registry stores a docker registry name in a structured form. -type Registry struct { - insecure bool - registry string -} - -// RegistryStr returns the registry component of the Registry. -func (r Registry) RegistryStr() string { - return r.registry -} - -// Name returns the name from which the Registry was derived. -func (r Registry) Name() string { - return r.RegistryStr() -} - -func (r Registry) String() string { - return r.Name() -} - -// Repo returns a Repository in the Registry with the given name. -func (r Registry) Repo(repo ...string) Repository { - return Repository{Registry: r, repository: path.Join(repo...)} -} - -// Scope returns the scope required to access the registry. -func (r Registry) Scope(string) string { - // The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z - return "registry:catalog:*" -} - -func (r Registry) isRFC1918() bool { - ipStr := strings.Split(r.Name(), ":")[0] - ip := net.ParseIP(ipStr) - if ip == nil { - return false - } - for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} { - _, block, _ := net.ParseCIDR(cidr) - if block.Contains(ip) { - return true - } - } - return false -} - -// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined. -func (r Registry) Scheme() string { - if r.insecure { - return "http" - } - if r.isRFC1918() { - return "http" - } - if strings.HasPrefix(r.Name(), "localhost:") { - return "http" - } - if reLocal.MatchString(r.Name()) { - return "http" - } - if reLoopback.MatchString(r.Name()) { - return "http" - } - if reipv6Loopback.MatchString(r.Name()) { - return "http" - } - return "https" -} - -func checkRegistry(name string) error { - // Per RFC 3986, registries (authorities) are required to be prefixed with "//" - // url.Host == hostname[:port] == authority - if url, err := url.Parse("//" + name); err != nil || url.Host != name { - return newErrBadName("registries must be valid RFC 3986 URI authorities: %s", name) - } - return nil -} - -// NewRegistry returns a Registry based on the given name. -// Strict validation requires explicit, valid RFC 3986 URI authorities to be given. -func NewRegistry(name string, opts ...Option) (Registry, error) { - opt := makeOptions(opts...) - if opt.strict && len(name) == 0 { - return Registry{}, newErrBadName("strict validation requires the registry to be explicitly defined") - } - - if err := checkRegistry(name); err != nil { - return Registry{}, err - } - - if name == "" { - name = opt.defaultRegistry - } - // Rewrite "docker.io" to "index.docker.io". - // See: https://github.com/google/go-containerregistry/issues/68 - if name == defaultRegistryAlias { - name = DefaultRegistry - } - - return Registry{registry: name, insecure: opt.insecure}, nil -} - -// NewInsecureRegistry returns an Insecure Registry based on the given name. -// -// Deprecated: Use the Insecure Option with NewRegistry instead. -func NewInsecureRegistry(name string, opts ...Option) (Registry, error) { - opts = append(opts, Insecure) - return NewRegistry(name, opts...) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/repository.go b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go deleted file mode 100644 index 9250e36252..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/repository.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "fmt" - "strings" -) - -const ( - defaultNamespace = "library" - repositoryChars = "abcdefghijklmnopqrstuvwxyz0123456789_-./" - regRepoDelimiter = "/" -) - -// Repository stores a docker repository name in a structured form. -type Repository struct { - Registry - repository string -} - -// See https://docs.docker.com/docker-hub/official_repos -func hasImplicitNamespace(repo string, reg Registry) bool { - return !strings.ContainsRune(repo, '/') && reg.RegistryStr() == DefaultRegistry -} - -// RepositoryStr returns the repository component of the Repository. -func (r Repository) RepositoryStr() string { - if hasImplicitNamespace(r.repository, r.Registry) { - return fmt.Sprintf("%s/%s", defaultNamespace, r.repository) - } - return r.repository -} - -// Name returns the name from which the Repository was derived. -func (r Repository) Name() string { - regName := r.Registry.Name() - if regName != "" { - return regName + regRepoDelimiter + r.RepositoryStr() - } - // TODO: As far as I can tell, this is unreachable. - return r.RepositoryStr() -} - -func (r Repository) String() string { - return r.Name() -} - -// Scope returns the scope required to perform the given action on the registry. -// TODO(jonjohnsonjr): consider moving scopes to a separate package. -func (r Repository) Scope(action string) string { - return fmt.Sprintf("repository:%s:%s", r.RepositoryStr(), action) -} - -func checkRepository(repository string) error { - return checkElement("repository", repository, repositoryChars, 2, 255) -} - -// NewRepository returns a new Repository representing the given name, according to the given strictness. -func NewRepository(name string, opts ...Option) (Repository, error) { - opt := makeOptions(opts...) - if len(name) == 0 { - return Repository{}, newErrBadName("a repository name must be specified") - } - - var registry string - repo := name - parts := strings.SplitN(name, regRepoDelimiter, 2) - if len(parts) == 2 && (strings.ContainsRune(parts[0], '.') || strings.ContainsRune(parts[0], ':')) { - // The first part of the repository is treated as the registry domain - // iff it contains a '.' or ':' character, otherwise it is all repository - // and the domain defaults to Docker Hub. - registry = parts[0] - repo = parts[1] - } - - if err := checkRepository(repo); err != nil { - return Repository{}, err - } - - reg, err := NewRegistry(registry, opts...) - if err != nil { - return Repository{}, err - } - if hasImplicitNamespace(repo, reg) && opt.strict { - return Repository{}, newErrBadName("strict validation requires the full repository path (missing 'library')") - } - return Repository{reg, repo}, nil -} - -// Tag returns a Tag in this Repository. -func (r Repository) Tag(identifier string) Tag { - t := Tag{ - tag: identifier, - Repository: r, - } - t.original = t.Name() - return t -} - -// Digest returns a Digest in this Repository. -func (r Repository) Digest(identifier string) Digest { - d := Digest{ - digest: identifier, - Repository: r, - } - d.original = d.Name() - return d -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/tag.go b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go deleted file mode 100644 index 66bd1bec3d..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/name/tag.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "strings" -) - -const ( - // TODO(dekkagaijin): use the docker/distribution regexes for validation. - tagChars = "abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ" - tagDelim = ":" -) - -// Tag stores a docker tag name in a structured form. -type Tag struct { - Repository - tag string - original string -} - -// Ensure Tag implements Reference -var _ Reference = (*Tag)(nil) - -// Context implements Reference. -func (t Tag) Context() Repository { - return t.Repository -} - -// Identifier implements Reference. -func (t Tag) Identifier() string { - return t.TagStr() -} - -// TagStr returns the tag component of the Tag. -func (t Tag) TagStr() string { - return t.tag -} - -// Name returns the name from which the Tag was derived. -func (t Tag) Name() string { - return t.Repository.Name() + tagDelim + t.TagStr() -} - -// String returns the original input string. -func (t Tag) String() string { - return t.original -} - -// Scope returns the scope required to perform the given action on the tag. -func (t Tag) Scope(action string) string { - return t.Repository.Scope(action) -} - -func checkTag(name string) error { - return checkElement("tag", name, tagChars, 1, 128) -} - -// NewTag returns a new Tag representing the given name, according to the given strictness. -func NewTag(name string, opts ...Option) (Tag, error) { - opt := makeOptions(opts...) - base := name - tag := "" - - // Split on ":" - parts := strings.Split(name, tagDelim) - // Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation. - if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], regRepoDelimiter) { - base = strings.Join(parts[:len(parts)-1], tagDelim) - tag = parts[len(parts)-1] - } - - // We don't require a tag, but if we get one check it's valid, - // even when not being strict. - // If we are being strict, we want to validate the tag regardless in case - // it's empty. - if tag != "" || opt.strict { - if err := checkTag(tag); err != nil { - return Tag{}, err - } - } - - if tag == "" { - tag = opt.defaultTag - } - - repo, err := NewRepository(base, opts...) - if err != nil { - return Tag{}, err - } - return Tag{ - Repository: repo, - tag: tag, - original: name, - }, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/registry/README.md b/vendor/github.com/google/go-containerregistry/pkg/registry/README.md deleted file mode 100644 index 5e58bbcd5c..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/registry/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# `pkg/registry` - -This package implements a Docker v2 registry and the OCI distribution specification. - -It is designed to be used anywhere a low dependency container registry is needed, with an initial focus on tests. - -Its goal is to be standards compliant and its strictness will increase over time. - -This is currently a low flightmiles system. It's likely quite safe to use in tests; If you're using it in production, please let us know how and send us PRs for integration tests. - -Before sending a PR, understand that the expectation of this package is that it remain free of extraneous dependencies. -This means that we expect `pkg/registry` to only have dependencies on Go's standard library, and other packages in `go-containerregistry`. - -You may be asked to change your code to reduce dependencies, and your PR might be rejected if this is deemed impossible. diff --git a/vendor/github.com/google/go-containerregistry/pkg/registry/blobs.go b/vendor/github.com/google/go-containerregistry/pkg/registry/blobs.go deleted file mode 100644 index 8386ffdf9e..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/registry/blobs.go +++ /dev/null @@ -1,485 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "log" - "math/rand" - "net/http" - "path" - "strings" - "sync" - - "github.com/google/go-containerregistry/internal/verify" - v1 "github.com/google/go-containerregistry/pkg/v1" -) - -// Returns whether this url should be handled by the blob handler -// This is complicated because blob is indicated by the trailing path, not the leading path. -// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#pulling-a-layer -// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#pushing-a-layer -func isBlob(req *http.Request) bool { - elem := strings.Split(req.URL.Path, "/") - elem = elem[1:] - if elem[len(elem)-1] == "" { - elem = elem[:len(elem)-1] - } - if len(elem) < 3 { - return false - } - return elem[len(elem)-2] == "blobs" || (elem[len(elem)-3] == "blobs" && - elem[len(elem)-2] == "uploads") -} - -// BlobHandler represents a minimal blob storage backend, capable of serving -// blob contents. -type BlobHandler interface { - // Get gets the blob contents, or errNotFound if the blob wasn't found. - Get(ctx context.Context, repo string, h v1.Hash) (io.ReadCloser, error) -} - -// BlobStatHandler is an extension interface representing a blob storage -// backend that can serve metadata about blobs. -type BlobStatHandler interface { - // Stat returns the size of the blob, or errNotFound if the blob wasn't - // found, or redirectError if the blob can be found elsewhere. - Stat(ctx context.Context, repo string, h v1.Hash) (int64, error) -} - -// BlobPutHandler is an extension interface representing a blob storage backend -// that can write blob contents. -type BlobPutHandler interface { - // Put puts the blob contents. - // - // The contents will be verified against the expected size and digest - // as the contents are read, and an error will be returned if these - // don't match. Implementations should return that error, or a wrapper - // around that error, to return the correct error when these don't match. - Put(ctx context.Context, repo string, h v1.Hash, rc io.ReadCloser) error -} - -// BlobDeleteHandler is an extension interface representing a blob storage -// backend that can delete blob contents. -type BlobDeleteHandler interface { - // Delete the blob contents. - Delete(ctx context.Context, repo string, h v1.Hash) error -} - -// redirectError represents a signal that the blob handler doesn't have the blob -// contents, but that those contents are at another location which registry -// clients should redirect to. -type redirectError struct { - // Location is the location to find the contents. - Location string - - // Code is the HTTP redirect status code to return to clients. - Code int -} - -func (e redirectError) Error() string { return fmt.Sprintf("redirecting (%d): %s", e.Code, e.Location) } - -// errNotFound represents an error locating the blob. -var errNotFound = errors.New("not found") - -type memHandler struct { - m map[string][]byte - lock sync.Mutex -} - -func NewInMemoryBlobHandler() BlobHandler { return &memHandler{m: map[string][]byte{}} } - -func (m *memHandler) Stat(_ context.Context, _ string, h v1.Hash) (int64, error) { - m.lock.Lock() - defer m.lock.Unlock() - - b, found := m.m[h.String()] - if !found { - return 0, errNotFound - } - return int64(len(b)), nil -} -func (m *memHandler) Get(_ context.Context, _ string, h v1.Hash) (io.ReadCloser, error) { - m.lock.Lock() - defer m.lock.Unlock() - - b, found := m.m[h.String()] - if !found { - return nil, errNotFound - } - return io.NopCloser(bytes.NewReader(b)), nil -} -func (m *memHandler) Put(_ context.Context, _ string, h v1.Hash, rc io.ReadCloser) error { - m.lock.Lock() - defer m.lock.Unlock() - - defer rc.Close() - all, err := io.ReadAll(rc) - if err != nil { - return err - } - m.m[h.String()] = all - return nil -} -func (m *memHandler) Delete(_ context.Context, _ string, h v1.Hash) error { - m.lock.Lock() - defer m.lock.Unlock() - - if _, found := m.m[h.String()]; !found { - return errNotFound - } - - delete(m.m, h.String()) - return nil -} - -// blobs -type blobs struct { - blobHandler BlobHandler - - // Each upload gets a unique id that writes occur to until finalized. - uploads map[string][]byte - lock sync.Mutex - log *log.Logger -} - -func (b *blobs) handle(resp http.ResponseWriter, req *http.Request) *regError { - elem := strings.Split(req.URL.Path, "/") - elem = elem[1:] - if elem[len(elem)-1] == "" { - elem = elem[:len(elem)-1] - } - // Must have a path of form /v2/{name}/blobs/{upload,sha256:} - if len(elem) < 4 { - return ®Error{ - Status: http.StatusBadRequest, - Code: "NAME_INVALID", - Message: "blobs must be attached to a repo", - } - } - target := elem[len(elem)-1] - service := elem[len(elem)-2] - digest := req.URL.Query().Get("digest") - contentRange := req.Header.Get("Content-Range") - - repo := req.URL.Host + path.Join(elem[1:len(elem)-2]...) - - switch req.Method { - case http.MethodHead: - h, err := v1.NewHash(target) - if err != nil { - return ®Error{ - Status: http.StatusBadRequest, - Code: "NAME_INVALID", - Message: "invalid digest", - } - } - - var size int64 - if bsh, ok := b.blobHandler.(BlobStatHandler); ok { - size, err = bsh.Stat(req.Context(), repo, h) - if errors.Is(err, errNotFound) { - return regErrBlobUnknown - } else if err != nil { - var rerr redirectError - if errors.As(err, &rerr) { - http.Redirect(resp, req, rerr.Location, rerr.Code) - return nil - } - return regErrInternal(err) - } - } else { - rc, err := b.blobHandler.Get(req.Context(), repo, h) - if errors.Is(err, errNotFound) { - return regErrBlobUnknown - } else if err != nil { - var rerr redirectError - if errors.As(err, &rerr) { - http.Redirect(resp, req, rerr.Location, rerr.Code) - return nil - } - return regErrInternal(err) - } - defer rc.Close() - size, err = io.Copy(io.Discard, rc) - if err != nil { - return regErrInternal(err) - } - } - - resp.Header().Set("Content-Length", fmt.Sprint(size)) - resp.Header().Set("Docker-Content-Digest", h.String()) - resp.WriteHeader(http.StatusOK) - return nil - - case http.MethodGet: - h, err := v1.NewHash(target) - if err != nil { - return ®Error{ - Status: http.StatusBadRequest, - Code: "NAME_INVALID", - Message: "invalid digest", - } - } - - var size int64 - var r io.Reader - if bsh, ok := b.blobHandler.(BlobStatHandler); ok { - size, err = bsh.Stat(req.Context(), repo, h) - if errors.Is(err, errNotFound) { - return regErrBlobUnknown - } else if err != nil { - var rerr redirectError - if errors.As(err, &rerr) { - http.Redirect(resp, req, rerr.Location, rerr.Code) - return nil - } - return regErrInternal(err) - } - - rc, err := b.blobHandler.Get(req.Context(), repo, h) - if errors.Is(err, errNotFound) { - return regErrBlobUnknown - } else if err != nil { - var rerr redirectError - if errors.As(err, &rerr) { - http.Redirect(resp, req, rerr.Location, rerr.Code) - return nil - } - - return regErrInternal(err) - } - defer rc.Close() - r = rc - } else { - tmp, err := b.blobHandler.Get(req.Context(), repo, h) - if errors.Is(err, errNotFound) { - return regErrBlobUnknown - } else if err != nil { - var rerr redirectError - if errors.As(err, &rerr) { - http.Redirect(resp, req, rerr.Location, rerr.Code) - return nil - } - - return regErrInternal(err) - } - defer tmp.Close() - var buf bytes.Buffer - io.Copy(&buf, tmp) - size = int64(buf.Len()) - r = &buf - } - - resp.Header().Set("Content-Length", fmt.Sprint(size)) - resp.Header().Set("Docker-Content-Digest", h.String()) - resp.WriteHeader(http.StatusOK) - io.Copy(resp, r) - return nil - - case http.MethodPost: - bph, ok := b.blobHandler.(BlobPutHandler) - if !ok { - return regErrUnsupported - } - - // It is weird that this is "target" instead of "service", but - // that's how the index math works out above. - if target != "uploads" { - return ®Error{ - Status: http.StatusBadRequest, - Code: "METHOD_UNKNOWN", - Message: fmt.Sprintf("POST to /blobs must be followed by /uploads, got %s", target), - } - } - - if digest != "" { - h, err := v1.NewHash(digest) - if err != nil { - return regErrDigestInvalid - } - - vrc, err := verify.ReadCloser(req.Body, req.ContentLength, h) - if err != nil { - return regErrInternal(err) - } - defer vrc.Close() - - if err = bph.Put(req.Context(), repo, h, vrc); err != nil { - if errors.As(err, &verify.Error{}) { - log.Printf("Digest mismatch: %v", err) - return regErrDigestMismatch - } - return regErrInternal(err) - } - resp.Header().Set("Docker-Content-Digest", h.String()) - resp.WriteHeader(http.StatusCreated) - return nil - } - - id := fmt.Sprint(rand.Int63()) - resp.Header().Set("Location", "/"+path.Join("v2", path.Join(elem[1:len(elem)-2]...), "blobs/uploads", id)) - resp.Header().Set("Range", "0-0") - resp.WriteHeader(http.StatusAccepted) - return nil - - case http.MethodPatch: - if service != "uploads" { - return ®Error{ - Status: http.StatusBadRequest, - Code: "METHOD_UNKNOWN", - Message: fmt.Sprintf("PATCH to /blobs must be followed by /uploads, got %s", service), - } - } - - if contentRange != "" { - start, end := 0, 0 - if _, err := fmt.Sscanf(contentRange, "%d-%d", &start, &end); err != nil { - return ®Error{ - Status: http.StatusRequestedRangeNotSatisfiable, - Code: "BLOB_UPLOAD_UNKNOWN", - Message: "We don't understand your Content-Range", - } - } - b.lock.Lock() - defer b.lock.Unlock() - if start != len(b.uploads[target]) { - return ®Error{ - Status: http.StatusRequestedRangeNotSatisfiable, - Code: "BLOB_UPLOAD_UNKNOWN", - Message: "Your content range doesn't match what we have", - } - } - l := bytes.NewBuffer(b.uploads[target]) - io.Copy(l, req.Body) - b.uploads[target] = l.Bytes() - resp.Header().Set("Location", "/"+path.Join("v2", path.Join(elem[1:len(elem)-3]...), "blobs/uploads", target)) - resp.Header().Set("Range", fmt.Sprintf("0-%d", len(l.Bytes())-1)) - resp.WriteHeader(http.StatusNoContent) - return nil - } - - b.lock.Lock() - defer b.lock.Unlock() - if _, ok := b.uploads[target]; ok { - return ®Error{ - Status: http.StatusBadRequest, - Code: "BLOB_UPLOAD_INVALID", - Message: "Stream uploads after first write are not allowed", - } - } - - l := &bytes.Buffer{} - io.Copy(l, req.Body) - - b.uploads[target] = l.Bytes() - resp.Header().Set("Location", "/"+path.Join("v2", path.Join(elem[1:len(elem)-3]...), "blobs/uploads", target)) - resp.Header().Set("Range", fmt.Sprintf("0-%d", len(l.Bytes())-1)) - resp.WriteHeader(http.StatusNoContent) - return nil - - case http.MethodPut: - bph, ok := b.blobHandler.(BlobPutHandler) - if !ok { - return regErrUnsupported - } - - if service != "uploads" { - return ®Error{ - Status: http.StatusBadRequest, - Code: "METHOD_UNKNOWN", - Message: fmt.Sprintf("PUT to /blobs must be followed by /uploads, got %s", service), - } - } - - if digest == "" { - return ®Error{ - Status: http.StatusBadRequest, - Code: "DIGEST_INVALID", - Message: "digest not specified", - } - } - - b.lock.Lock() - defer b.lock.Unlock() - - h, err := v1.NewHash(digest) - if err != nil { - return ®Error{ - Status: http.StatusBadRequest, - Code: "NAME_INVALID", - Message: "invalid digest", - } - } - - defer req.Body.Close() - in := io.NopCloser(io.MultiReader(bytes.NewBuffer(b.uploads[target]), req.Body)) - - size := int64(verify.SizeUnknown) - if req.ContentLength > 0 { - size = int64(len(b.uploads[target])) + req.ContentLength - } - - vrc, err := verify.ReadCloser(in, size, h) - if err != nil { - return regErrInternal(err) - } - defer vrc.Close() - - if err := bph.Put(req.Context(), repo, h, vrc); err != nil { - if errors.As(err, &verify.Error{}) { - log.Printf("Digest mismatch: %v", err) - return regErrDigestMismatch - } - return regErrInternal(err) - } - - delete(b.uploads, target) - resp.Header().Set("Docker-Content-Digest", h.String()) - resp.WriteHeader(http.StatusCreated) - return nil - - case http.MethodDelete: - bdh, ok := b.blobHandler.(BlobDeleteHandler) - if !ok { - return regErrUnsupported - } - - h, err := v1.NewHash(target) - if err != nil { - return ®Error{ - Status: http.StatusBadRequest, - Code: "NAME_INVALID", - Message: "invalid digest", - } - } - if err := bdh.Delete(req.Context(), repo, h); err != nil { - return regErrInternal(err) - } - resp.WriteHeader(http.StatusAccepted) - return nil - - default: - return ®Error{ - Status: http.StatusBadRequest, - Code: "METHOD_UNKNOWN", - Message: "We don't understand your method + url", - } - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/registry/blobs_disk.go b/vendor/github.com/google/go-containerregistry/pkg/registry/blobs_disk.go deleted file mode 100644 index 361390f049..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/registry/blobs_disk.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2023 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "context" - "errors" - "io" - "os" - "path/filepath" - - v1 "github.com/google/go-containerregistry/pkg/v1" -) - -type diskHandler struct { - dir string -} - -func NewDiskBlobHandler(dir string) BlobHandler { return &diskHandler{dir: dir} } - -func (m *diskHandler) blobHashPath(h v1.Hash) string { - return filepath.Join(m.dir, h.Algorithm, h.Hex) -} - -func (m *diskHandler) Stat(_ context.Context, _ string, h v1.Hash) (int64, error) { - fi, err := os.Stat(m.blobHashPath(h)) - if errors.Is(err, os.ErrNotExist) { - return 0, errNotFound - } else if err != nil { - return 0, err - } - return fi.Size(), nil -} -func (m *diskHandler) Get(_ context.Context, _ string, h v1.Hash) (io.ReadCloser, error) { - return os.Open(m.blobHashPath(h)) -} -func (m *diskHandler) Put(_ context.Context, _ string, h v1.Hash, rc io.ReadCloser) error { - // Put the temp file in the same directory to avoid cross-device problems - // during the os.Rename. The filenames cannot conflict. - f, err := os.CreateTemp(m.dir, "upload-*") - if err != nil { - return err - } - - if err := func() error { - defer f.Close() - _, err := io.Copy(f, rc) - return err - }(); err != nil { - return err - } - if err := os.MkdirAll(filepath.Join(m.dir, h.Algorithm), os.ModePerm); err != nil { - return err - } - return os.Rename(f.Name(), m.blobHashPath(h)) -} -func (m *diskHandler) Delete(_ context.Context, _ string, h v1.Hash) error { - return os.Remove(m.blobHashPath(h)) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/registry/error.go b/vendor/github.com/google/go-containerregistry/pkg/registry/error.go deleted file mode 100644 index f8e126dace..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/registry/error.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "encoding/json" - "net/http" -) - -type regError struct { - Status int - Code string - Message string -} - -func (r *regError) Write(resp http.ResponseWriter) error { - resp.WriteHeader(r.Status) - - type err struct { - Code string `json:"code"` - Message string `json:"message"` - } - type wrap struct { - Errors []err `json:"errors"` - } - return json.NewEncoder(resp).Encode(wrap{ - Errors: []err{ - { - Code: r.Code, - Message: r.Message, - }, - }, - }) -} - -// regErrInternal returns an internal server error. -func regErrInternal(err error) *regError { - return ®Error{ - Status: http.StatusInternalServerError, - Code: "INTERNAL_SERVER_ERROR", - Message: err.Error(), - } -} - -var regErrBlobUnknown = ®Error{ - Status: http.StatusNotFound, - Code: "BLOB_UNKNOWN", - Message: "Unknown blob", -} - -var regErrUnsupported = ®Error{ - Status: http.StatusMethodNotAllowed, - Code: "UNSUPPORTED", - Message: "Unsupported operation", -} - -var regErrDigestMismatch = ®Error{ - Status: http.StatusBadRequest, - Code: "DIGEST_INVALID", - Message: "digest does not match contents", -} - -var regErrDigestInvalid = ®Error{ - Status: http.StatusBadRequest, - Code: "NAME_INVALID", - Message: "invalid digest", -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/registry/manifest.go b/vendor/github.com/google/go-containerregistry/pkg/registry/manifest.go deleted file mode 100644 index db8a8dc690..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/registry/manifest.go +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "log" - "net/http" - "sort" - "strconv" - "strings" - "sync" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -type catalog struct { - Repos []string `json:"repositories"` -} - -type listTags struct { - Name string `json:"name"` - Tags []string `json:"tags"` -} - -type manifest struct { - contentType string - blob []byte -} - -type manifests struct { - // maps repo -> manifest tag/digest -> manifest - manifests map[string]map[string]manifest - lock sync.RWMutex - log *log.Logger -} - -func isManifest(req *http.Request) bool { - elems := strings.Split(req.URL.Path, "/") - elems = elems[1:] - if len(elems) < 4 { - return false - } - return elems[len(elems)-2] == "manifests" -} - -func isTags(req *http.Request) bool { - elems := strings.Split(req.URL.Path, "/") - elems = elems[1:] - if len(elems) < 4 { - return false - } - return elems[len(elems)-2] == "tags" -} - -func isCatalog(req *http.Request) bool { - elems := strings.Split(req.URL.Path, "/") - elems = elems[1:] - if len(elems) < 2 { - return false - } - - return elems[len(elems)-1] == "_catalog" -} - -// Returns whether this url should be handled by the referrers handler -func isReferrers(req *http.Request) bool { - elems := strings.Split(req.URL.Path, "/") - elems = elems[1:] - if len(elems) < 4 { - return false - } - return elems[len(elems)-2] == "referrers" -} - -// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#pulling-an-image-manifest -// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#pushing-an-image -func (m *manifests) handle(resp http.ResponseWriter, req *http.Request) *regError { - elem := strings.Split(req.URL.Path, "/") - elem = elem[1:] - target := elem[len(elem)-1] - repo := strings.Join(elem[1:len(elem)-2], "/") - - switch req.Method { - case http.MethodGet: - m.lock.RLock() - defer m.lock.RUnlock() - - c, ok := m.manifests[repo] - if !ok { - return ®Error{ - Status: http.StatusNotFound, - Code: "NAME_UNKNOWN", - Message: "Unknown name", - } - } - m, ok := c[target] - if !ok { - return ®Error{ - Status: http.StatusNotFound, - Code: "MANIFEST_UNKNOWN", - Message: "Unknown manifest", - } - } - - h, _, _ := v1.SHA256(bytes.NewReader(m.blob)) - resp.Header().Set("Docker-Content-Digest", h.String()) - resp.Header().Set("Content-Type", m.contentType) - resp.Header().Set("Content-Length", fmt.Sprint(len(m.blob))) - resp.WriteHeader(http.StatusOK) - io.Copy(resp, bytes.NewReader(m.blob)) - return nil - - case http.MethodHead: - m.lock.RLock() - defer m.lock.RUnlock() - - if _, ok := m.manifests[repo]; !ok { - return ®Error{ - Status: http.StatusNotFound, - Code: "NAME_UNKNOWN", - Message: "Unknown name", - } - } - m, ok := m.manifests[repo][target] - if !ok { - return ®Error{ - Status: http.StatusNotFound, - Code: "MANIFEST_UNKNOWN", - Message: "Unknown manifest", - } - } - - h, _, _ := v1.SHA256(bytes.NewReader(m.blob)) - resp.Header().Set("Docker-Content-Digest", h.String()) - resp.Header().Set("Content-Type", m.contentType) - resp.Header().Set("Content-Length", fmt.Sprint(len(m.blob))) - resp.WriteHeader(http.StatusOK) - return nil - - case http.MethodPut: - b := &bytes.Buffer{} - io.Copy(b, req.Body) - h, _, _ := v1.SHA256(bytes.NewReader(b.Bytes())) - digest := h.String() - mf := manifest{ - blob: b.Bytes(), - contentType: req.Header.Get("Content-Type"), - } - - // If the manifest is a manifest list, check that the manifest - // list's constituent manifests are already uploaded. - // This isn't strictly required by the registry API, but some - // registries require this. - if types.MediaType(mf.contentType).IsIndex() { - if err := func() *regError { - m.lock.RLock() - defer m.lock.RUnlock() - - im, err := v1.ParseIndexManifest(b) - if err != nil { - return ®Error{ - Status: http.StatusBadRequest, - Code: "MANIFEST_INVALID", - Message: err.Error(), - } - } - for _, desc := range im.Manifests { - if !desc.MediaType.IsDistributable() { - continue - } - if desc.MediaType.IsIndex() || desc.MediaType.IsImage() { - if _, found := m.manifests[repo][desc.Digest.String()]; !found { - return ®Error{ - Status: http.StatusNotFound, - Code: "MANIFEST_UNKNOWN", - Message: fmt.Sprintf("Sub-manifest %q not found", desc.Digest), - } - } - } else { - // TODO: Probably want to do an existence check for blobs. - m.log.Printf("TODO: Check blobs for %q", desc.Digest) - } - } - return nil - }(); err != nil { - return err - } - } - - m.lock.Lock() - defer m.lock.Unlock() - - if _, ok := m.manifests[repo]; !ok { - m.manifests[repo] = make(map[string]manifest, 2) - } - - // Allow future references by target (tag) and immutable digest. - // See https://docs.docker.com/engine/reference/commandline/pull/#pull-an-image-by-digest-immutable-identifier. - m.manifests[repo][digest] = mf - m.manifests[repo][target] = mf - resp.Header().Set("Docker-Content-Digest", digest) - resp.WriteHeader(http.StatusCreated) - return nil - - case http.MethodDelete: - m.lock.Lock() - defer m.lock.Unlock() - if _, ok := m.manifests[repo]; !ok { - return ®Error{ - Status: http.StatusNotFound, - Code: "NAME_UNKNOWN", - Message: "Unknown name", - } - } - - _, ok := m.manifests[repo][target] - if !ok { - return ®Error{ - Status: http.StatusNotFound, - Code: "MANIFEST_UNKNOWN", - Message: "Unknown manifest", - } - } - - delete(m.manifests[repo], target) - resp.WriteHeader(http.StatusAccepted) - return nil - - default: - return ®Error{ - Status: http.StatusBadRequest, - Code: "METHOD_UNKNOWN", - Message: "We don't understand your method + url", - } - } -} - -func (m *manifests) handleTags(resp http.ResponseWriter, req *http.Request) *regError { - elem := strings.Split(req.URL.Path, "/") - elem = elem[1:] - repo := strings.Join(elem[1:len(elem)-2], "/") - - if req.Method == "GET" { - m.lock.RLock() - defer m.lock.RUnlock() - - c, ok := m.manifests[repo] - if !ok { - return ®Error{ - Status: http.StatusNotFound, - Code: "NAME_UNKNOWN", - Message: "Unknown name", - } - } - - var tags []string - for tag := range c { - if !strings.Contains(tag, "sha256:") { - tags = append(tags, tag) - } - } - sort.Strings(tags) - - // https://github.com/opencontainers/distribution-spec/blob/b505e9cc53ec499edbd9c1be32298388921bb705/detail.md#tags-paginated - // Offset using last query parameter. - if last := req.URL.Query().Get("last"); last != "" { - for i, t := range tags { - if t > last { - tags = tags[i:] - break - } - } - } - - // Limit using n query parameter. - if ns := req.URL.Query().Get("n"); ns != "" { - if n, err := strconv.Atoi(ns); err != nil { - return ®Error{ - Status: http.StatusBadRequest, - Code: "BAD_REQUEST", - Message: fmt.Sprintf("parsing n: %v", err), - } - } else if n < len(tags) { - tags = tags[:n] - } - } - - tagsToList := listTags{ - Name: repo, - Tags: tags, - } - - msg, _ := json.Marshal(tagsToList) - resp.Header().Set("Content-Length", fmt.Sprint(len(msg))) - resp.WriteHeader(http.StatusOK) - io.Copy(resp, bytes.NewReader([]byte(msg))) - return nil - } - - return ®Error{ - Status: http.StatusBadRequest, - Code: "METHOD_UNKNOWN", - Message: "We don't understand your method + url", - } -} - -func (m *manifests) handleCatalog(resp http.ResponseWriter, req *http.Request) *regError { - query := req.URL.Query() - nStr := query.Get("n") - n := 10000 - if nStr != "" { - n, _ = strconv.Atoi(nStr) - } - - if req.Method == "GET" { - m.lock.RLock() - defer m.lock.RUnlock() - - var repos []string - countRepos := 0 - // TODO: implement pagination - for key := range m.manifests { - if countRepos >= n { - break - } - countRepos++ - - repos = append(repos, key) - } - - repositoriesToList := catalog{ - Repos: repos, - } - - msg, _ := json.Marshal(repositoriesToList) - resp.Header().Set("Content-Length", fmt.Sprint(len(msg))) - resp.WriteHeader(http.StatusOK) - io.Copy(resp, bytes.NewReader([]byte(msg))) - return nil - } - - return ®Error{ - Status: http.StatusBadRequest, - Code: "METHOD_UNKNOWN", - Message: "We don't understand your method + url", - } -} - -// TODO: implement handling of artifactType querystring -func (m *manifests) handleReferrers(resp http.ResponseWriter, req *http.Request) *regError { - // Ensure this is a GET request - if req.Method != "GET" { - return ®Error{ - Status: http.StatusBadRequest, - Code: "METHOD_UNKNOWN", - Message: "We don't understand your method + url", - } - } - - elem := strings.Split(req.URL.Path, "/") - elem = elem[1:] - target := elem[len(elem)-1] - repo := strings.Join(elem[1:len(elem)-2], "/") - - // Validate that incoming target is a valid digest - if _, err := v1.NewHash(target); err != nil { - return ®Error{ - Status: http.StatusBadRequest, - Code: "UNSUPPORTED", - Message: "Target must be a valid digest", - } - } - - m.lock.RLock() - defer m.lock.RUnlock() - - digestToManifestMap, repoExists := m.manifests[repo] - if !repoExists { - return ®Error{ - Status: http.StatusNotFound, - Code: "NAME_UNKNOWN", - Message: "Unknown name", - } - } - - im := v1.IndexManifest{ - SchemaVersion: 2, - MediaType: types.OCIImageIndex, - Manifests: []v1.Descriptor{}, - } - for digest, manifest := range digestToManifestMap { - h, err := v1.NewHash(digest) - if err != nil { - continue - } - var refPointer struct { - Subject *v1.Descriptor `json:"subject"` - } - json.Unmarshal(manifest.blob, &refPointer) - if refPointer.Subject == nil { - continue - } - referenceDigest := refPointer.Subject.Digest - if referenceDigest.String() != target { - continue - } - // At this point, we know the current digest references the target - var imageAsArtifact struct { - Config struct { - MediaType string `json:"mediaType"` - } `json:"config"` - } - json.Unmarshal(manifest.blob, &imageAsArtifact) - im.Manifests = append(im.Manifests, v1.Descriptor{ - MediaType: types.MediaType(manifest.contentType), - Size: int64(len(manifest.blob)), - Digest: h, - ArtifactType: imageAsArtifact.Config.MediaType, - }) - } - msg, _ := json.Marshal(&im) - resp.Header().Set("Content-Length", fmt.Sprint(len(msg))) - resp.Header().Set("Content-Type", string(types.OCIImageIndex)) - resp.WriteHeader(http.StatusOK) - io.Copy(resp, bytes.NewReader([]byte(msg))) - return nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/registry/registry.go b/vendor/github.com/google/go-containerregistry/pkg/registry/registry.go deleted file mode 100644 index 2f8fd1127b..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/registry/registry.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package registry implements a docker V2 registry and the OCI distribution specification. -// -// It is designed to be used anywhere a low dependency container registry is needed, with an -// initial focus on tests. -// -// Its goal is to be standards compliant and its strictness will increase over time. -// -// This is currently a low flightmiles system. It's likely quite safe to use in tests; If you're using it -// in production, please let us know how and send us CL's for integration tests. -package registry - -import ( - "fmt" - "log" - "math/rand" - "net/http" - "os" -) - -type registry struct { - log *log.Logger - blobs blobs - manifests manifests - referrersEnabled bool - warnings map[float64]string -} - -// https://docs.docker.com/registry/spec/api/#api-version-check -// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#api-version-check -func (r *registry) v2(resp http.ResponseWriter, req *http.Request) *regError { - if r.warnings != nil { - rnd := rand.Float64() - for prob, msg := range r.warnings { - if prob > rnd { - resp.Header().Add("Warning", fmt.Sprintf(`299 - "%s"`, msg)) - } - } - } - - if isBlob(req) { - return r.blobs.handle(resp, req) - } - if isManifest(req) { - return r.manifests.handle(resp, req) - } - if isTags(req) { - return r.manifests.handleTags(resp, req) - } - if isCatalog(req) { - return r.manifests.handleCatalog(resp, req) - } - if r.referrersEnabled && isReferrers(req) { - return r.manifests.handleReferrers(resp, req) - } - resp.Header().Set("Docker-Distribution-API-Version", "registry/2.0") - if req.URL.Path != "/v2/" && req.URL.Path != "/v2" { - return ®Error{ - Status: http.StatusNotFound, - Code: "METHOD_UNKNOWN", - Message: "We don't understand your method + url", - } - } - resp.WriteHeader(200) - return nil -} - -func (r *registry) root(resp http.ResponseWriter, req *http.Request) { - if rerr := r.v2(resp, req); rerr != nil { - r.log.Printf("%s %s %d %s %s", req.Method, req.URL, rerr.Status, rerr.Code, rerr.Message) - rerr.Write(resp) - return - } - r.log.Printf("%s %s", req.Method, req.URL) -} - -// New returns a handler which implements the docker registry protocol. -// It should be registered at the site root. -func New(opts ...Option) http.Handler { - r := ®istry{ - log: log.New(os.Stderr, "", log.LstdFlags), - blobs: blobs{ - blobHandler: &memHandler{m: map[string][]byte{}}, - uploads: map[string][]byte{}, - log: log.New(os.Stderr, "", log.LstdFlags), - }, - manifests: manifests{ - manifests: map[string]map[string]manifest{}, - log: log.New(os.Stderr, "", log.LstdFlags), - }, - } - for _, o := range opts { - o(r) - } - return http.HandlerFunc(r.root) -} - -// Option describes the available options -// for creating the registry. -type Option func(r *registry) - -// Logger overrides the logger used to record requests to the registry. -func Logger(l *log.Logger) Option { - return func(r *registry) { - r.log = l - r.manifests.log = l - r.blobs.log = l - } -} - -// WithReferrersSupport enables the referrers API endpoint (OCI 1.1+) -func WithReferrersSupport(enabled bool) Option { - return func(r *registry) { - r.referrersEnabled = enabled - } -} - -func WithWarning(prob float64, msg string) Option { - return func(r *registry) { - if r.warnings == nil { - r.warnings = map[float64]string{} - } - r.warnings[prob] = msg - } -} - -func WithBlobHandler(h BlobHandler) Option { - return func(r *registry) { - r.blobs.blobHandler = h - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/registry/tls.go b/vendor/github.com/google/go-containerregistry/pkg/registry/tls.go deleted file mode 100644 index cb2644e615..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/registry/tls.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "net/http/httptest" - - ggcrtest "github.com/google/go-containerregistry/internal/httptest" -) - -// TLS returns an httptest server, with an http client that has been configured to -// send all requests to the returned server. The TLS certs are generated for the given domain -// which should correspond to the domain the image is stored in. -// If you need a transport, Client().Transport is correctly configured. -func TLS(domain string) (*httptest.Server, error) { - return ggcrtest.NewTLSServer(domain, New()) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/cache/cache.go b/vendor/github.com/google/go-containerregistry/pkg/v1/cache/cache.go deleted file mode 100644 index 31d9c935c1..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/cache/cache.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2021 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cache provides methods to cache layers. -package cache - -import ( - "errors" - "io" - - "github.com/google/go-containerregistry/pkg/logs" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// Cache encapsulates methods to interact with cached layers. -type Cache interface { - // Put writes the Layer to the Cache. - // - // The returned Layer should be used for future operations, since lazy - // cachers might only populate the cache when the layer is actually - // consumed. - // - // The returned layer can be consumed, and the cache entry populated, - // by calling either Compressed or Uncompressed and consuming the - // returned io.ReadCloser. - Put(v1.Layer) (v1.Layer, error) - - // Get returns the Layer cached by the given Hash, or ErrNotFound if no - // such layer was found. - Get(v1.Hash) (v1.Layer, error) - - // Delete removes the Layer with the given Hash from the Cache. - Delete(v1.Hash) error -} - -// ErrNotFound is returned by Get when no layer with the given Hash is found. -var ErrNotFound = errors.New("layer was not found") - -// Image returns a new Image which wraps the given Image, whose layers will be -// pulled from the Cache if they are found, and written to the Cache as they -// are read from the underlying Image. -func Image(i v1.Image, c Cache) v1.Image { - return &image{ - Image: i, - c: c, - } -} - -type image struct { - v1.Image - c Cache -} - -func (i *image) Layers() ([]v1.Layer, error) { - ls, err := i.Image.Layers() - if err != nil { - return nil, err - } - - out := make([]v1.Layer, len(ls)) - for idx, l := range ls { - out[idx] = &lazyLayer{inner: l, c: i.c} - } - return out, nil -} - -type lazyLayer struct { - inner v1.Layer - c Cache -} - -func (l *lazyLayer) Compressed() (io.ReadCloser, error) { - digest, err := l.inner.Digest() - if err != nil { - return nil, err - } - - if cl, err := l.c.Get(digest); err == nil { - // Layer found in the cache. - logs.Progress.Printf("Layer %s found (compressed) in cache", digest) - return cl.Compressed() - } else if !errors.Is(err, ErrNotFound) { - return nil, err - } - - // Not cached, pull and return the real layer. - logs.Progress.Printf("Layer %s not found (compressed) in cache, getting", digest) - rl, err := l.c.Put(l.inner) - if err != nil { - return nil, err - } - return rl.Compressed() -} - -func (l *lazyLayer) Uncompressed() (io.ReadCloser, error) { - diffID, err := l.inner.DiffID() - if err != nil { - return nil, err - } - if cl, err := l.c.Get(diffID); err == nil { - // Layer found in the cache. - logs.Progress.Printf("Layer %s found (uncompressed) in cache", diffID) - return cl.Uncompressed() - } else if !errors.Is(err, ErrNotFound) { - return nil, err - } - - // Not cached, pull and return the real layer. - logs.Progress.Printf("Layer %s not found (uncompressed) in cache, getting", diffID) - rl, err := l.c.Put(l.inner) - if err != nil { - return nil, err - } - return rl.Uncompressed() -} - -func (l *lazyLayer) Size() (int64, error) { return l.inner.Size() } -func (l *lazyLayer) DiffID() (v1.Hash, error) { return l.inner.DiffID() } -func (l *lazyLayer) Digest() (v1.Hash, error) { return l.inner.Digest() } -func (l *lazyLayer) MediaType() (types.MediaType, error) { return l.inner.MediaType() } - -func (i *image) LayerByDigest(h v1.Hash) (v1.Layer, error) { - l, err := i.c.Get(h) - if errors.Is(err, ErrNotFound) { - // Not cached, get it and write it. - l, err := i.Image.LayerByDigest(h) - if err != nil { - return nil, err - } - return i.c.Put(l) - } - return l, err -} - -func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) { - l, err := i.c.Get(h) - if errors.Is(err, ErrNotFound) { - // Not cached, get it and write it. - l, err := i.Image.LayerByDiffID(h) - if err != nil { - return nil, err - } - return i.c.Put(l) - } - return l, err -} - -// ImageIndex returns a new ImageIndex which wraps the given ImageIndex's -// children with either Image(child, c) or ImageIndex(child, c) depending on type. -func ImageIndex(ii v1.ImageIndex, c Cache) v1.ImageIndex { - return &imageIndex{ - inner: ii, - c: c, - } -} - -type imageIndex struct { - inner v1.ImageIndex - c Cache -} - -func (ii *imageIndex) MediaType() (types.MediaType, error) { return ii.inner.MediaType() } -func (ii *imageIndex) Digest() (v1.Hash, error) { return ii.inner.Digest() } -func (ii *imageIndex) Size() (int64, error) { return ii.inner.Size() } -func (ii *imageIndex) IndexManifest() (*v1.IndexManifest, error) { return ii.inner.IndexManifest() } -func (ii *imageIndex) RawManifest() ([]byte, error) { return ii.inner.RawManifest() } - -func (ii *imageIndex) Image(h v1.Hash) (v1.Image, error) { - i, err := ii.inner.Image(h) - if err != nil { - return nil, err - } - return Image(i, ii.c), nil -} - -func (ii *imageIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { - idx, err := ii.inner.ImageIndex(h) - if err != nil { - return nil, err - } - return ImageIndex(idx, ii.c), nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/cache/fs.go b/vendor/github.com/google/go-containerregistry/pkg/v1/cache/fs.go deleted file mode 100644 index 75b826e3e4..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/cache/fs.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2021 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cache - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/tarball" -) - -type fscache struct { - path string -} - -// NewFilesystemCache returns a Cache implementation backed by files. -func NewFilesystemCache(path string) Cache { - return &fscache{path} -} - -func (fs *fscache) Put(l v1.Layer) (v1.Layer, error) { - digest, err := l.Digest() - if err != nil { - return nil, err - } - diffID, err := l.DiffID() - if err != nil { - return nil, err - } - return &layer{ - Layer: l, - path: fs.path, - digest: digest, - diffID: diffID, - }, nil -} - -type layer struct { - v1.Layer - path string - digest, diffID v1.Hash -} - -func (l *layer) create(h v1.Hash) (io.WriteCloser, error) { - if err := os.MkdirAll(l.path, 0700); err != nil { - return nil, err - } - return os.Create(cachepath(l.path, h)) -} - -func (l *layer) Compressed() (io.ReadCloser, error) { - f, err := l.create(l.digest) - if err != nil { - return nil, err - } - rc, err := l.Layer.Compressed() - if err != nil { - return nil, err - } - return &readcloser{ - t: io.TeeReader(rc, f), - closes: []func() error{rc.Close, f.Close}, - }, nil -} - -func (l *layer) Uncompressed() (io.ReadCloser, error) { - f, err := l.create(l.diffID) - if err != nil { - return nil, err - } - rc, err := l.Layer.Uncompressed() - if err != nil { - return nil, err - } - return &readcloser{ - t: io.TeeReader(rc, f), - closes: []func() error{rc.Close, f.Close}, - }, nil -} - -type readcloser struct { - t io.Reader - closes []func() error -} - -func (rc *readcloser) Read(b []byte) (int, error) { - return rc.t.Read(b) -} - -func (rc *readcloser) Close() error { - // Call all Close methods, even if any returned an error. Return the - // first returned error. - var err error - for _, c := range rc.closes { - lastErr := c() - if err == nil { - err = lastErr - } - } - return err -} - -func (fs *fscache) Get(h v1.Hash) (v1.Layer, error) { - l, err := tarball.LayerFromFile(cachepath(fs.path, h)) - if os.IsNotExist(err) { - return nil, ErrNotFound - } - if errors.Is(err, io.ErrUnexpectedEOF) { - // Delete and return ErrNotFound because the layer was incomplete. - if err := fs.Delete(h); err != nil { - return nil, err - } - return nil, ErrNotFound - } - return l, err -} - -func (fs *fscache) Delete(h v1.Hash) error { - err := os.Remove(cachepath(fs.path, h)) - if os.IsNotExist(err) { - return ErrNotFound - } - return err -} - -func cachepath(path string, h v1.Hash) string { - var file string - if runtime.GOOS == "windows" { - file = fmt.Sprintf("%s-%s", h.Algorithm, h.Hex) - } else { - file = h.String() - } - return filepath.Join(path, file) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/cache/ro.go b/vendor/github.com/google/go-containerregistry/pkg/v1/cache/ro.go deleted file mode 100644 index 028a612461..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/cache/ro.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cache - -import v1 "github.com/google/go-containerregistry/pkg/v1" - -// ReadOnly returns a read-only implementation of the given Cache. -// -// Put and Delete operations are a no-op. -func ReadOnly(c Cache) Cache { return &ro{Cache: c} } - -type ro struct{ Cache } - -func (ro) Put(l v1.Layer) (v1.Layer, error) { return l, nil } -func (ro) Delete(v1.Hash) error { return nil } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go deleted file mode 100644 index 960c93b5f4..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "encoding/json" - "io" - "time" -) - -// ConfigFile is the configuration file that holds the metadata describing -// how to launch a container. See: -// https://github.com/opencontainers/image-spec/blob/master/config.md -// -// docker_version and os.version are not part of the spec but included -// for backwards compatibility. -type ConfigFile struct { - Architecture string `json:"architecture"` - Author string `json:"author,omitempty"` - Container string `json:"container,omitempty"` - Created Time `json:"created,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - History []History `json:"history,omitempty"` - OS string `json:"os"` - RootFS RootFS `json:"rootfs"` - Config Config `json:"config"` - OSVersion string `json:"os.version,omitempty"` - Variant string `json:"variant,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` -} - -// Platform attempts to generates a Platform from the ConfigFile fields. -func (cf *ConfigFile) Platform() *Platform { - if cf.OS == "" && cf.Architecture == "" && cf.OSVersion == "" && cf.Variant == "" && len(cf.OSFeatures) == 0 { - return nil - } - return &Platform{ - OS: cf.OS, - Architecture: cf.Architecture, - OSVersion: cf.OSVersion, - Variant: cf.Variant, - OSFeatures: cf.OSFeatures, - } -} - -// History is one entry of a list recording how this container image was built. -type History struct { - Author string `json:"author,omitempty"` - Created Time `json:"created,omitempty"` - CreatedBy string `json:"created_by,omitempty"` - Comment string `json:"comment,omitempty"` - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Time is a wrapper around time.Time to help with deep copying -type Time struct { - time.Time -} - -// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time -// type is effectively immutable in the time API, so it is safe to -// copy-by-assign, despite the presence of (unexported) Pointer fields. -func (t *Time) DeepCopyInto(out *Time) { - *out = *t -} - -// RootFS holds the ordered list of file system deltas that comprise the -// container image's root filesystem. -type RootFS struct { - Type string `json:"type"` - DiffIDs []Hash `json:"diff_ids"` -} - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Config is a submessage of the config file described as: -// -// The execution parameters which SHOULD be used as a base when running -// a container using the image. -// -// The names of the fields in this message are chosen to reflect the JSON -// payload of the Config as defined here: -// https://git.io/vrAET -// and -// https://github.com/opencontainers/image-spec/blob/master/config.md -type Config struct { - AttachStderr bool `json:"AttachStderr,omitempty"` - AttachStdin bool `json:"AttachStdin,omitempty"` - AttachStdout bool `json:"AttachStdout,omitempty"` - Cmd []string `json:"Cmd,omitempty"` - Healthcheck *HealthConfig `json:"Healthcheck,omitempty"` - Domainname string `json:"Domainname,omitempty"` - Entrypoint []string `json:"Entrypoint,omitempty"` - Env []string `json:"Env,omitempty"` - Hostname string `json:"Hostname,omitempty"` - Image string `json:"Image,omitempty"` - Labels map[string]string `json:"Labels,omitempty"` - OnBuild []string `json:"OnBuild,omitempty"` - OpenStdin bool `json:"OpenStdin,omitempty"` - StdinOnce bool `json:"StdinOnce,omitempty"` - Tty bool `json:"Tty,omitempty"` - User string `json:"User,omitempty"` - Volumes map[string]struct{} `json:"Volumes,omitempty"` - WorkingDir string `json:"WorkingDir,omitempty"` - ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` - ArgsEscaped bool `json:"ArgsEscaped,omitempty"` - NetworkDisabled bool `json:"NetworkDisabled,omitempty"` - MacAddress string `json:"MacAddress,omitempty"` - StopSignal string `json:"StopSignal,omitempty"` - Shell []string `json:"Shell,omitempty"` -} - -// ParseConfigFile parses the io.Reader's contents into a ConfigFile. -func ParseConfigFile(r io.Reader) (*ConfigFile, error) { - cf := ConfigFile{} - if err := json.NewDecoder(r).Decode(&cf); err != nil { - return nil, err - } - return &cf, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/README.md deleted file mode 100644 index 74fc3a87c0..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# `daemon` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/daemon?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/daemon) - -The `daemon` package enables reading/writing images from/to the docker daemon. - -It is not fully fleshed out, but is useful for interoperability, see various issues: - -* https://github.com/google/go-containerregistry/issues/205 -* https://github.com/google/go-containerregistry/issues/552 -* https://github.com/google/go-containerregistry/issues/627 diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/doc.go deleted file mode 100644 index ac05d96121..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package daemon provides facilities for reading/writing v1.Image from/to -// a running daemon. -package daemon diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/image.go deleted file mode 100644 index d2efcb372e..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/image.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package daemon - -import ( - "bytes" - "context" - "io" - "sync" - "time" - - api "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/tarball" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -type image struct { - ref name.Reference - opener *imageOpener - tarballImage v1.Image - computed bool - id *v1.Hash - configFile *v1.ConfigFile - - once sync.Once - err error -} - -type imageOpener struct { - ref name.Reference - ctx context.Context - - buffered bool - client Client - - once sync.Once - bytes []byte - err error -} - -func (i *imageOpener) saveImage() (io.ReadCloser, error) { - return i.client.ImageSave(i.ctx, []string{i.ref.Name()}) -} - -func (i *imageOpener) bufferedOpener() (io.ReadCloser, error) { - // Store the tarball in memory and return a new reader into the bytes each time we need to access something. - i.once.Do(func() { - i.bytes, i.err = func() ([]byte, error) { - rc, err := i.saveImage() - if err != nil { - return nil, err - } - defer rc.Close() - - return io.ReadAll(rc) - }() - }) - - // Wrap the bytes in a ReadCloser so it looks like an opened file. - return io.NopCloser(bytes.NewReader(i.bytes)), i.err -} - -func (i *imageOpener) opener() tarball.Opener { - if i.buffered { - return i.bufferedOpener - } - - // To avoid storing the tarball in memory, do a save every time we need to access something. - return i.saveImage -} - -// Image provides access to an image reference from the Docker daemon, -// applying functional options to the underlying imageOpener before -// resolving the reference into a v1.Image. -func Image(ref name.Reference, options ...Option) (v1.Image, error) { - o, err := makeOptions(options...) - if err != nil { - return nil, err - } - - i := &imageOpener{ - ref: ref, - buffered: o.buffered, - client: o.client, - ctx: o.ctx, - } - - img := &image{ - ref: ref, - opener: i, - } - - // Eagerly fetch Image ID to ensure it actually exists. - // https://github.com/google/go-containerregistry/issues/1186 - id, err := img.ConfigName() - if err != nil { - return nil, err - } - img.id = &id - - return img, nil -} - -func (i *image) initialize() error { - // Don't re-initialize tarball if already initialized. - if i.tarballImage == nil { - i.once.Do(func() { - i.tarballImage, i.err = tarball.Image(i.opener.opener(), nil) - }) - } - return i.err -} - -func (i *image) compute() error { - // Don't re-compute if already computed. - if i.computed { - return nil - } - - inspect, _, err := i.opener.client.ImageInspectWithRaw(i.opener.ctx, i.ref.String()) - if err != nil { - return err - } - - configFile, err := i.computeConfigFile(inspect) - if err != nil { - return err - } - - i.configFile = configFile - i.computed = true - - return nil -} - -func (i *image) Layers() ([]v1.Layer, error) { - if err := i.initialize(); err != nil { - return nil, err - } - return i.tarballImage.Layers() -} - -func (i *image) MediaType() (types.MediaType, error) { - if err := i.initialize(); err != nil { - return "", err - } - return i.tarballImage.MediaType() -} - -func (i *image) Size() (int64, error) { - if err := i.initialize(); err != nil { - return 0, err - } - return i.tarballImage.Size() -} - -func (i *image) ConfigName() (v1.Hash, error) { - if i.id != nil { - return *i.id, nil - } - res, _, err := i.opener.client.ImageInspectWithRaw(i.opener.ctx, i.ref.String()) - if err != nil { - return v1.Hash{}, err - } - return v1.NewHash(res.ID) -} - -func (i *image) ConfigFile() (*v1.ConfigFile, error) { - if err := i.compute(); err != nil { - return nil, err - } - return i.configFile.DeepCopy(), nil -} - -func (i *image) RawConfigFile() ([]byte, error) { - if err := i.initialize(); err != nil { - return nil, err - } - - // RawConfigFile cannot be generated from "docker inspect" because Docker Engine API returns serialized data, - // and formatting information of the raw config such as indent and prefix will be lost. - return i.tarballImage.RawConfigFile() -} - -func (i *image) Digest() (v1.Hash, error) { - if err := i.initialize(); err != nil { - return v1.Hash{}, err - } - return i.tarballImage.Digest() -} - -func (i *image) Manifest() (*v1.Manifest, error) { - if err := i.initialize(); err != nil { - return nil, err - } - return i.tarballImage.Manifest() -} - -func (i *image) RawManifest() ([]byte, error) { - if err := i.initialize(); err != nil { - return nil, err - } - return i.tarballImage.RawManifest() -} - -func (i *image) LayerByDigest(h v1.Hash) (v1.Layer, error) { - if err := i.initialize(); err != nil { - return nil, err - } - return i.tarballImage.LayerByDigest(h) -} - -func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) { - if err := i.initialize(); err != nil { - return nil, err - } - return i.tarballImage.LayerByDiffID(h) -} - -func (i *image) configHistory(author string) ([]v1.History, error) { - historyItems, err := i.opener.client.ImageHistory(i.opener.ctx, i.ref.String()) - if err != nil { - return nil, err - } - - history := make([]v1.History, len(historyItems)) - for j, h := range historyItems { - history[j] = v1.History{ - Author: author, - Created: v1.Time{ - Time: time.Unix(h.Created, 0).UTC(), - }, - CreatedBy: h.CreatedBy, - Comment: h.Comment, - EmptyLayer: h.Size == 0, - } - } - return history, nil -} - -func (i *image) diffIDs(rootFS api.RootFS) ([]v1.Hash, error) { - diffIDs := make([]v1.Hash, len(rootFS.Layers)) - for j, l := range rootFS.Layers { - h, err := v1.NewHash(l) - if err != nil { - return nil, err - } - diffIDs[j] = h - } - return diffIDs, nil -} - -func (i *image) computeConfigFile(inspect api.ImageInspect) (*v1.ConfigFile, error) { - diffIDs, err := i.diffIDs(inspect.RootFS) - if err != nil { - return nil, err - } - - history, err := i.configHistory(inspect.Author) - if err != nil { - return nil, err - } - - created, err := time.Parse(time.RFC3339Nano, inspect.Created) - if err != nil { - return nil, err - } - - return &v1.ConfigFile{ - Architecture: inspect.Architecture, - Author: inspect.Author, - Container: inspect.Container, - Created: v1.Time{Time: created}, - DockerVersion: inspect.DockerVersion, - History: history, - OS: inspect.Os, - RootFS: v1.RootFS{ - Type: inspect.RootFS.Type, - DiffIDs: diffIDs, - }, - Config: i.computeImageConfig(inspect.Config), - OSVersion: inspect.OsVersion, - }, nil -} - -func (i *image) computeImageConfig(config *container.Config) v1.Config { - if config == nil { - return v1.Config{} - } - - c := v1.Config{ - AttachStderr: config.AttachStderr, - AttachStdin: config.AttachStdin, - AttachStdout: config.AttachStdout, - Cmd: config.Cmd, - Domainname: config.Domainname, - Entrypoint: config.Entrypoint, - Env: config.Env, - Hostname: config.Hostname, - Image: config.Image, - Labels: config.Labels, - OnBuild: config.OnBuild, - OpenStdin: config.OpenStdin, - StdinOnce: config.StdinOnce, - Tty: config.Tty, - User: config.User, - Volumes: config.Volumes, - WorkingDir: config.WorkingDir, - ArgsEscaped: config.ArgsEscaped, - NetworkDisabled: config.NetworkDisabled, - MacAddress: config.MacAddress, - StopSignal: config.StopSignal, - Shell: config.Shell, - } - - if config.Healthcheck != nil { - c.Healthcheck = &v1.HealthConfig{ - Test: config.Healthcheck.Test, - Interval: config.Healthcheck.Interval, - Timeout: config.Healthcheck.Timeout, - StartPeriod: config.Healthcheck.StartPeriod, - Retries: config.Healthcheck.Retries, - } - } - - if len(config.ExposedPorts) > 0 { - c.ExposedPorts = map[string]struct{}{} - for port := range c.ExposedPorts { - c.ExposedPorts[port] = struct{}{} - } - } - - return c -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/options.go deleted file mode 100644 index b806463697..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/options.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package daemon - -import ( - "context" - "io" - - "github.com/docker/docker/api/types" - api "github.com/docker/docker/api/types/image" - "github.com/docker/docker/client" -) - -// ImageOption is an alias for Option. -// Deprecated: Use Option instead. -type ImageOption Option - -// Option is a functional option for daemon operations. -type Option func(*options) - -type options struct { - ctx context.Context - client Client - buffered bool -} - -var defaultClient = func() (Client, error) { - return client.NewClientWithOpts(client.FromEnv) -} - -func makeOptions(opts ...Option) (*options, error) { - o := &options{ - buffered: true, - ctx: context.Background(), - } - for _, opt := range opts { - opt(o) - } - - if o.client == nil { - client, err := defaultClient() - if err != nil { - return nil, err - } - o.client = client - } - o.client.NegotiateAPIVersion(o.ctx) - - return o, nil -} - -// WithBufferedOpener buffers the image. -func WithBufferedOpener() Option { - return func(o *options) { - o.buffered = true - } -} - -// WithUnbufferedOpener streams the image to avoid buffering. -func WithUnbufferedOpener() Option { - return func(o *options) { - o.buffered = false - } -} - -// WithClient is a functional option to allow injecting a docker client. -// -// By default, github.com/docker/docker/client.FromEnv is used. -func WithClient(client Client) Option { - return func(o *options) { - o.client = client - } -} - -// WithContext is a functional option to pass through a context.Context. -// -// By default, context.Background() is used. -func WithContext(ctx context.Context) Option { - return func(o *options) { - o.ctx = ctx - } -} - -// Client represents the subset of a docker client that the daemon -// package uses. -type Client interface { - NegotiateAPIVersion(ctx context.Context) - ImageSave(context.Context, []string) (io.ReadCloser, error) - ImageLoad(context.Context, io.Reader, bool) (types.ImageLoadResponse, error) - ImageTag(context.Context, string, string) error - ImageInspectWithRaw(context.Context, string) (types.ImageInspect, []byte, error) - ImageHistory(context.Context, string) ([]api.HistoryResponseItem, error) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/write.go deleted file mode 100644 index 3ca5b52dd2..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/write.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package daemon - -import ( - "fmt" - "io" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/tarball" -) - -// Tag adds a tag to an already existent image. -func Tag(src, dest name.Tag, options ...Option) error { - o, err := makeOptions(options...) - if err != nil { - return err - } - - return o.client.ImageTag(o.ctx, src.String(), dest.String()) -} - -// Write saves the image into the daemon as the given tag. -func Write(tag name.Tag, img v1.Image, options ...Option) (string, error) { - o, err := makeOptions(options...) - if err != nil { - return "", err - } - - // If we already have this image by this image ID, we can skip loading it. - id, err := img.ConfigName() - if err != nil { - return "", fmt.Errorf("computing image ID: %w", err) - } - if resp, _, err := o.client.ImageInspectWithRaw(o.ctx, id.String()); err == nil { - want := tag.String() - - // If we already have this tag, we can skip tagging it. - for _, have := range resp.RepoTags { - if have == want { - return "", nil - } - } - - return "", o.client.ImageTag(o.ctx, id.String(), want) - } - - pr, pw := io.Pipe() - go func() { - pw.CloseWithError(tarball.Write(tag, img, pw)) - }() - - // write the image in docker save format first, then load it - resp, err := o.client.ImageLoad(o.ctx, pr, false) - if err != nil { - return "", fmt.Errorf("error loading image: %w", err) - } - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - response := string(b) - if err != nil { - return response, fmt.Errorf("error reading load response body: %w", err) - } - return response, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go deleted file mode 100644 index 7a84736be2..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +k8s:deepcopy-gen=package - -// Package v1 defines structured types for OCI v1 images -package v1 diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md deleted file mode 100644 index 8663a830fd..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# `empty` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/empty?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/empty) - -The empty packages provides an empty base for constructing a `v1.Image` or `v1.ImageIndex`. -This is especially useful when paired with the [`mutate`](/pkg/v1/mutate) package, -see [`mutate.Append`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate#Append) -and [`mutate.AppendManifests`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate#AppendManifests). diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go deleted file mode 100644 index 1a521e9a74..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package empty provides an implementation of v1.Image equivalent to "FROM scratch". -package empty diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go deleted file mode 100644 index c58a06ce02..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package empty - -import ( - "fmt" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// Image is a singleton empty image, think: FROM scratch. -var Image, _ = partial.UncompressedToImage(emptyImage{}) - -type emptyImage struct{} - -// MediaType implements partial.UncompressedImageCore. -func (i emptyImage) MediaType() (types.MediaType, error) { - return types.DockerManifestSchema2, nil -} - -// RawConfigFile implements partial.UncompressedImageCore. -func (i emptyImage) RawConfigFile() ([]byte, error) { - return partial.RawConfigFile(i) -} - -// ConfigFile implements v1.Image. -func (i emptyImage) ConfigFile() (*v1.ConfigFile, error) { - return &v1.ConfigFile{ - RootFS: v1.RootFS{ - // Some clients check this. - Type: "layers", - }, - }, nil -} - -func (i emptyImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) { - return nil, fmt.Errorf("LayerByDiffID(%s): empty image", h) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go deleted file mode 100644 index 18b414891b..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package empty - -import ( - "encoding/json" - "errors" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// Index is a singleton empty index, think: FROM scratch. -var Index = emptyIndex{} - -type emptyIndex struct{} - -func (i emptyIndex) MediaType() (types.MediaType, error) { - return types.OCIImageIndex, nil -} - -func (i emptyIndex) Digest() (v1.Hash, error) { - return partial.Digest(i) -} - -func (i emptyIndex) Size() (int64, error) { - return partial.Size(i) -} - -func (i emptyIndex) IndexManifest() (*v1.IndexManifest, error) { - return base(), nil -} - -func (i emptyIndex) RawManifest() ([]byte, error) { - return json.Marshal(base()) -} - -func (i emptyIndex) Image(v1.Hash) (v1.Image, error) { - return nil, errors.New("empty index") -} - -func (i emptyIndex) ImageIndex(v1.Hash) (v1.ImageIndex, error) { - return nil, errors.New("empty index") -} - -func base() *v1.IndexManifest { - return &v1.IndexManifest{ - SchemaVersion: 2, - MediaType: types.OCIImageIndex, - Manifests: []v1.Descriptor{}, - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/google/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/google/README.md deleted file mode 100644 index 7cd8971fe5..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/google/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# `google` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/google?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/google) - -The `google` package provides: -* Some google-specific authentication methods. -* Some [GCR](gcr.io)-specific listing methods. diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/google/auth.go b/vendor/github.com/google/go-containerregistry/pkg/v1/google/auth.go deleted file mode 100644 index 11ae397969..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/google/auth.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package google - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "os/exec" - "time" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/logs" - "golang.org/x/oauth2" - googauth "golang.org/x/oauth2/google" -) - -const cloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" - -// GetGcloudCmd is exposed so we can test this. -var GetGcloudCmd = func() *exec.Cmd { - // This is odd, but basically what docker-credential-gcr does. - // - // config-helper is undocumented, but it's purportedly the only supported way - // of accessing tokens (`gcloud auth print-access-token` is discouraged). - // - // --force-auth-refresh means we are getting a token that is valid for about - // an hour (we reuse it until it's expired). - return exec.Command("gcloud", "config", "config-helper", "--force-auth-refresh", "--format=json(credential)") -} - -// NewEnvAuthenticator returns an authn.Authenticator that generates access -// tokens from the environment we're running in. -// -// See: https://godoc.org/golang.org/x/oauth2/google#FindDefaultCredentials -func NewEnvAuthenticator() (authn.Authenticator, error) { - ts, err := googauth.DefaultTokenSource(context.Background(), cloudPlatformScope) - if err != nil { - return nil, err - } - - token, err := ts.Token() - if err != nil { - return nil, err - } - - return &tokenSourceAuth{oauth2.ReuseTokenSource(token, ts)}, nil -} - -// NewGcloudAuthenticator returns an oauth2.TokenSource that generates access -// tokens by shelling out to the gcloud sdk. -func NewGcloudAuthenticator() (authn.Authenticator, error) { - if _, err := exec.LookPath("gcloud"); err != nil { - // gcloud is not available, fall back to anonymous - logs.Warn.Println("gcloud binary not found") - return authn.Anonymous, nil - } - - ts := gcloudSource{GetGcloudCmd} - - // Attempt to fetch a token to ensure gcloud is installed and we can run it. - token, err := ts.Token() - if err != nil { - return nil, err - } - - return &tokenSourceAuth{oauth2.ReuseTokenSource(token, ts)}, nil -} - -// NewJSONKeyAuthenticator returns a Basic authenticator which uses Service Account -// as a way of authenticating with Google Container Registry. -// More information: https://cloud.google.com/container-registry/docs/advanced-authentication#json_key_file -func NewJSONKeyAuthenticator(serviceAccountJSON string) authn.Authenticator { - return &authn.Basic{ - Username: "_json_key", - Password: serviceAccountJSON, - } -} - -// NewTokenAuthenticator returns an oauth2.TokenSource that generates access -// tokens by using the Google SDK to produce JWT tokens from a Service Account. -// More information: https://godoc.org/golang.org/x/oauth2/google#JWTAccessTokenSourceFromJSON -func NewTokenAuthenticator(serviceAccountJSON string, scope string) (authn.Authenticator, error) { - ts, err := googauth.JWTAccessTokenSourceFromJSON([]byte(serviceAccountJSON), scope) - if err != nil { - return nil, err - } - - return &tokenSourceAuth{oauth2.ReuseTokenSource(nil, ts)}, nil -} - -// NewTokenSourceAuthenticator converts an oauth2.TokenSource into an authn.Authenticator. -func NewTokenSourceAuthenticator(ts oauth2.TokenSource) authn.Authenticator { - return &tokenSourceAuth{ts} -} - -// tokenSourceAuth turns an oauth2.TokenSource into an authn.Authenticator. -type tokenSourceAuth struct { - oauth2.TokenSource -} - -// Authorization implements authn.Authenticator. -func (tsa *tokenSourceAuth) Authorization() (*authn.AuthConfig, error) { - token, err := tsa.Token() - if err != nil { - return nil, err - } - - return &authn.AuthConfig{ - Username: "_token", - Password: token.AccessToken, - }, nil -} - -// gcloudOutput represents the output of the gcloud command we invoke. -// -// `gcloud config config-helper --format=json(credential)` looks something like: -// -// { -// "credential": { -// "access_token": "supersecretaccesstoken", -// "token_expiry": "2018-12-02T04:08:13Z" -// } -// } -type gcloudOutput struct { - Credential struct { - AccessToken string `json:"access_token"` - TokenExpiry string `json:"token_expiry"` - } `json:"credential"` -} - -type gcloudSource struct { - // This is passed in so that we mock out gcloud and test Token. - exec func() *exec.Cmd -} - -// Token implements oauath2.TokenSource. -func (gs gcloudSource) Token() (*oauth2.Token, error) { - cmd := gs.exec() - var out bytes.Buffer - cmd.Stdout = &out - - // Don't attempt to interpret stderr, just pass it through. - cmd.Stderr = logs.Warn.Writer() - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("error executing `gcloud config config-helper`: %w", err) - } - - creds := gcloudOutput{} - if err := json.Unmarshal(out.Bytes(), &creds); err != nil { - return nil, fmt.Errorf("failed to parse `gcloud config config-helper` output: %w", err) - } - - expiry, err := time.Parse(time.RFC3339, creds.Credential.TokenExpiry) - if err != nil { - return nil, fmt.Errorf("failed to parse gcloud token expiry: %w", err) - } - - token := oauth2.Token{ - AccessToken: creds.Credential.AccessToken, - Expiry: expiry, - } - - return &token, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/google/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/google/doc.go deleted file mode 100644 index b6a67df049..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/google/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package google provides facilities for listing images in gcr.io. -package google diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/google/keychain.go b/vendor/github.com/google/go-containerregistry/pkg/v1/google/keychain.go deleted file mode 100644 index 5472b18797..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/google/keychain.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package google - -import ( - "strings" - "sync" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/logs" -) - -// Keychain exports an instance of the google Keychain. -var Keychain authn.Keychain = &googleKeychain{} - -type googleKeychain struct { - once sync.Once - auth authn.Authenticator -} - -// Resolve implements authn.Keychain a la docker-credential-gcr. -// -// This behaves similarly to the GCR credential helper, but reuses tokens until -// they expire. -// -// We can't easily add this behavior to our credential helper implementation -// of authn.Authenticator because the credential helper protocol doesn't include -// expiration information, see here: -// https://godoc.org/github.com/docker/docker-credential-helpers/credentials#Credentials -// -// In addition to being a performance optimization, the reuse of these access -// tokens works around a bug in gcloud. It appears that attempting to invoke -// `gcloud config config-helper` multiple times too quickly will fail: -// https://github.com/GoogleCloudPlatform/docker-credential-gcr/issues/54 -// -// We could upstream this behavior into docker-credential-gcr by parsing -// gcloud's output and persisting its tokens across invocations, but then -// we have to deal with invalidating caches across multiple runs (no fun). -// -// In general, we don't worry about that here because we expect to use the same -// gcloud configuration in the scope of this one process. -func (gk *googleKeychain) Resolve(target authn.Resource) (authn.Authenticator, error) { - // Only authenticate GCR and AR so it works with authn.NewMultiKeychain to fallback. - if !isGoogle(target.RegistryStr()) { - return authn.Anonymous, nil - } - - gk.once.Do(func() { - gk.auth = resolve() - }) - - return gk.auth, nil -} - -func resolve() authn.Authenticator { - auth, envErr := NewEnvAuthenticator() - if envErr == nil && auth != authn.Anonymous { - logs.Debug.Println("google.Keychain: using Application Default Credentials") - return auth - } - - auth, gErr := NewGcloudAuthenticator() - if gErr == nil && auth != authn.Anonymous { - logs.Debug.Println("google.Keychain: using gcloud fallback") - return auth - } - - logs.Debug.Println("Failed to get any Google credentials, falling back to Anonymous") - if envErr != nil { - logs.Debug.Printf("Google env error: %v", envErr) - } - if gErr != nil { - logs.Debug.Printf("gcloud error: %v", gErr) - } - return authn.Anonymous -} - -func isGoogle(host string) bool { - return host == "gcr.io" || - strings.HasSuffix(host, ".gcr.io") || - strings.HasSuffix(host, ".pkg.dev") || - strings.HasSuffix(host, ".google.com") -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/google/list.go b/vendor/github.com/google/go-containerregistry/pkg/v1/google/list.go deleted file mode 100644 index 8a5906c235..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/google/list.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package google - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/logs" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" -) - -// Option is a functional option for List and Walk. -// TODO: Can we somehow reuse the remote options here? -type Option func(*lister) error - -type lister struct { - auth authn.Authenticator - transport http.RoundTripper - repo name.Repository - client *http.Client - ctx context.Context - userAgent string -} - -func newLister(repo name.Repository, options ...Option) (*lister, error) { - l := &lister{ - auth: authn.Anonymous, - transport: http.DefaultTransport, - repo: repo, - ctx: context.Background(), - } - - for _, option := range options { - if err := option(l); err != nil { - return nil, err - } - } - - // transport.Wrapper is a signal that consumers are opt-ing into providing their own transport without any additional wrapping. - // This is to allow consumers full control over the transports logic, such as providing retry logic. - if _, ok := l.transport.(*transport.Wrapper); !ok { - // Wrap the transport in something that logs requests and responses. - // It's expensive to generate the dumps, so skip it if we're writing - // to nothing. - if logs.Enabled(logs.Debug) { - l.transport = transport.NewLogger(l.transport) - } - - // Wrap the transport in something that can retry network flakes. - l.transport = transport.NewRetry(l.transport) - - // Wrap this last to prevent transport.New from double-wrapping. - if l.userAgent != "" { - l.transport = transport.NewUserAgent(l.transport, l.userAgent) - } - } - - scopes := []string{repo.Scope(transport.PullScope)} - tr, err := transport.NewWithContext(l.ctx, repo.Registry, l.auth, l.transport, scopes) - if err != nil { - return nil, err - } - - l.client = &http.Client{Transport: tr} - - return l, nil -} - -func (l *lister) list(repo name.Repository) (*Tags, error) { - uri := &url.URL{ - Scheme: repo.Registry.Scheme(), - Host: repo.Registry.RegistryStr(), - Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()), - RawQuery: "n=10000", - } - - // ECR returns an error if n > 1000: - // https://github.com/google/go-containerregistry/issues/681 - if !isGoogle(repo.RegistryStr()) { - uri.RawQuery = "n=1000" - } - - tags := Tags{} - - // get responses until there is no next page - for { - select { - case <-l.ctx.Done(): - return nil, l.ctx.Err() - default: - } - - req, err := http.NewRequest("GET", uri.String(), nil) - if err != nil { - return nil, err - } - req = req.WithContext(l.ctx) - - resp, err := l.client.Do(req) - if err != nil { - return nil, err - } - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, err - } - - parsed := Tags{} - if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { - return nil, err - } - - if err := resp.Body.Close(); err != nil { - return nil, err - } - - if len(parsed.Manifests) != 0 || len(parsed.Children) != 0 { - // We're dealing with GCR, just return directly. - return &parsed, nil - } - - // This isn't GCR, just append the tags and keep paginating. - tags.Tags = append(tags.Tags, parsed.Tags...) - - uri, err = getNextPageURL(resp) - if err != nil { - return nil, err - } - // no next page - if uri == nil { - break - } - logs.Warn.Printf("saw non-google tag listing response, falling back to pagination") - } - - return &tags, nil -} - -// getNextPageURL checks if there is a Link header in a http.Response which -// contains a link to the next page. If yes it returns the url.URL of the next -// page otherwise it returns nil. -func getNextPageURL(resp *http.Response) (*url.URL, error) { - link := resp.Header.Get("Link") - if link == "" { - return nil, nil - } - - if link[0] != '<' { - return nil, fmt.Errorf("failed to parse link header: missing '<' in: %s", link) - } - - end := strings.Index(link, ">") - if end == -1 { - return nil, fmt.Errorf("failed to parse link header: missing '>' in: %s", link) - } - link = link[1:end] - - linkURL, err := url.Parse(link) - if err != nil { - return nil, err - } - if resp.Request == nil || resp.Request.URL == nil { - return nil, nil - } - linkURL = resp.Request.URL.ResolveReference(linkURL) - return linkURL, nil -} - -type rawManifestInfo struct { - Size string `json:"imageSizeBytes"` - MediaType string `json:"mediaType"` - Created string `json:"timeCreatedMs"` - Uploaded string `json:"timeUploadedMs"` - Tags []string `json:"tag"` -} - -// ManifestInfo is a Manifests entry is the output of List and Walk. -type ManifestInfo struct { - Size uint64 `json:"imageSizeBytes"` - MediaType string `json:"mediaType"` - Created time.Time `json:"timeCreatedMs"` - Uploaded time.Time `json:"timeUploadedMs"` - Tags []string `json:"tag"` -} - -func fromUnixMs(ms int64) time.Time { - sec := ms / 1000 - ns := (ms % 1000) * 1000000 - return time.Unix(sec, ns) -} - -func toUnixMs(t time.Time) string { - return strconv.FormatInt(t.UnixNano()/1000000, 10) -} - -// MarshalJSON implements json.Marshaler -func (m ManifestInfo) MarshalJSON() ([]byte, error) { - return json.Marshal(rawManifestInfo{ - Size: strconv.FormatUint(m.Size, 10), - MediaType: m.MediaType, - Created: toUnixMs(m.Created), - Uploaded: toUnixMs(m.Uploaded), - Tags: m.Tags, - }) -} - -// UnmarshalJSON implements json.Unmarshaler -func (m *ManifestInfo) UnmarshalJSON(data []byte) error { - raw := rawManifestInfo{} - if err := json.Unmarshal(data, &raw); err != nil { - return err - } - - if raw.Size != "" { - size, err := strconv.ParseUint(raw.Size, 10, 64) - if err != nil { - return err - } - m.Size = size - } - - if raw.Created != "" { - created, err := strconv.ParseInt(raw.Created, 10, 64) - if err != nil { - return err - } - m.Created = fromUnixMs(created) - } - - if raw.Uploaded != "" { - uploaded, err := strconv.ParseInt(raw.Uploaded, 10, 64) - if err != nil { - return err - } - m.Uploaded = fromUnixMs(uploaded) - } - - m.MediaType = raw.MediaType - m.Tags = raw.Tags - - return nil -} - -// Tags is the result of List and Walk. -type Tags struct { - Children []string `json:"child"` - Manifests map[string]ManifestInfo `json:"manifest"` - Name string `json:"name"` - Tags []string `json:"tags"` -} - -// List calls /tags/list for the given repository. -func List(repo name.Repository, options ...Option) (*Tags, error) { - l, err := newLister(repo, options...) - if err != nil { - return nil, err - } - - return l.list(repo) -} - -// WalkFunc is the type of the function called for each repository visited by -// Walk. This implements a similar API to filepath.Walk. -// -// The repo argument contains the argument to Walk as a prefix; that is, if Walk -// is called with "gcr.io/foo", which is a repository containing the repository -// "bar", the walk function will be called with argument "gcr.io/foo/bar". -// The tags and error arguments are the result of calling List on repo. -// -// TODO: Do we want a SkipDir error, as in filepath.WalkFunc? -type WalkFunc func(repo name.Repository, tags *Tags, err error) error - -func walk(repo name.Repository, tags *Tags, walkFn WalkFunc, options ...Option) error { - if tags == nil { - // This shouldn't happen. - return fmt.Errorf("tags nil for %q", repo) - } - - if err := walkFn(repo, tags, nil); err != nil { - return err - } - - for _, path := range tags.Children { - child, err := name.NewRepository(fmt.Sprintf("%s/%s", repo, path), name.StrictValidation) - if err != nil { - // We don't expect this ever, so don't pass it through to walkFn. - return fmt.Errorf("unexpected path failure: %w", err) - } - - childTags, err := List(child, options...) - if err != nil { - if err := walkFn(child, nil, err); err != nil { - return err - } - } else { - if err := walk(child, childTags, walkFn, options...); err != nil { - return err - } - } - } - - // We made it! - return nil -} - -// Walk recursively descends repositories, calling walkFn. -func Walk(root name.Repository, walkFn WalkFunc, options ...Option) error { - tags, err := List(root, options...) - if err != nil { - return walkFn(root, nil, err) - } - - return walk(root, tags, walkFn, options...) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/google/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/google/options.go deleted file mode 100644 index 604808cea6..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/google/options.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package google - -import ( - "context" - "net/http" - - "github.com/google/go-containerregistry/pkg/authn" -) - -// WithTransport is a functional option for overriding the default transport -// on a remote image -func WithTransport(t http.RoundTripper) Option { - return func(l *lister) error { - l.transport = t - return nil - } -} - -// WithAuth is a functional option for overriding the default authenticator -// on a remote image -func WithAuth(auth authn.Authenticator) Option { - return func(l *lister) error { - l.auth = auth - return nil - } -} - -// WithAuthFromKeychain is a functional option for overriding the default -// authenticator on a remote image using an authn.Keychain -func WithAuthFromKeychain(keys authn.Keychain) Option { - return func(l *lister) error { - auth, err := keys.Resolve(l.repo.Registry) - if err != nil { - return err - } - l.auth = auth - return nil - } -} - -// WithContext is a functional option for overriding the default -// context.Context for HTTP request to list remote images -func WithContext(ctx context.Context) Option { - return func(l *lister) error { - l.ctx = ctx - return nil - } -} - -// WithUserAgent adds the given string to the User-Agent header for any HTTP -// requests. This header will also include "go-containerregistry/${version}". -// -// If you want to completely overwrite the User-Agent header, use WithTransport. -func WithUserAgent(ua string) Option { - return func(l *lister) error { - l.userAgent = ua - return nil - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go deleted file mode 100644 index f78a5fa89e..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "crypto" - "encoding/hex" - "encoding/json" - "fmt" - "hash" - "io" - "strconv" - "strings" -) - -// Hash is an unqualified digest of some content, e.g. sha256:deadbeef -type Hash struct { - // Algorithm holds the algorithm used to compute the hash. - Algorithm string - - // Hex holds the hex portion of the content hash. - Hex string -} - -// String reverses NewHash returning the string-form of the hash. -func (h Hash) String() string { - return fmt.Sprintf("%s:%s", h.Algorithm, h.Hex) -} - -// NewHash validates the input string is a hash and returns a strongly type Hash object. -func NewHash(s string) (Hash, error) { - h := Hash{} - if err := h.parse(s); err != nil { - return Hash{}, err - } - return h, nil -} - -// MarshalJSON implements json.Marshaler -func (h Hash) MarshalJSON() ([]byte, error) { - return json.Marshal(h.String()) -} - -// UnmarshalJSON implements json.Unmarshaler -func (h *Hash) UnmarshalJSON(data []byte) error { - s, err := strconv.Unquote(string(data)) - if err != nil { - return err - } - return h.parse(s) -} - -// MarshalText implements encoding.TextMarshaler. This is required to use -// v1.Hash as a key in a map when marshalling JSON. -func (h Hash) MarshalText() (text []byte, err error) { - return []byte(h.String()), nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. This is required to use -// v1.Hash as a key in a map when unmarshalling JSON. -func (h *Hash) UnmarshalText(text []byte) error { - return h.parse(string(text)) -} - -// Hasher returns a hash.Hash for the named algorithm (e.g. "sha256") -func Hasher(name string) (hash.Hash, error) { - switch name { - case "sha256": - return crypto.SHA256.New(), nil - default: - return nil, fmt.Errorf("unsupported hash: %q", name) - } -} - -func (h *Hash) parse(unquoted string) error { - parts := strings.Split(unquoted, ":") - if len(parts) != 2 { - return fmt.Errorf("cannot parse hash: %q", unquoted) - } - - rest := strings.TrimLeft(parts[1], "0123456789abcdef") - if len(rest) != 0 { - return fmt.Errorf("found non-hex character in hash: %c", rest[0]) - } - - hasher, err := Hasher(parts[0]) - if err != nil { - return err - } - // Compare the hex to the expected size (2 hex characters per byte) - if len(parts[1]) != hasher.Size()*2 { - return fmt.Errorf("wrong number of hex digits for %s: %s", parts[0], parts[1]) - } - - h.Algorithm = parts[0] - h.Hex = parts[1] - return nil -} - -// SHA256 computes the Hash of the provided io.Reader's content. -func SHA256(r io.Reader) (Hash, int64, error) { - hasher := crypto.SHA256.New() - n, err := io.Copy(hasher, r) - if err != nil { - return Hash{}, 0, err - } - return Hash{ - Algorithm: "sha256", - Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), - }, n, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go deleted file mode 100644 index 8de9e47645..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/image.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// Image defines the interface for interacting with an OCI v1 image. -type Image interface { - // Layers returns the ordered collection of filesystem layers that comprise this image. - // The order of the list is oldest/base layer first, and most-recent/top layer last. - Layers() ([]Layer, error) - - // MediaType of this image's manifest. - MediaType() (types.MediaType, error) - - // Size returns the size of the manifest. - Size() (int64, error) - - // ConfigName returns the hash of the image's config file, also known as - // the Image ID. - ConfigName() (Hash, error) - - // ConfigFile returns this image's config file. - ConfigFile() (*ConfigFile, error) - - // RawConfigFile returns the serialized bytes of ConfigFile(). - RawConfigFile() ([]byte, error) - - // Digest returns the sha256 of this image's manifest. - Digest() (Hash, error) - - // Manifest returns this image's Manifest object. - Manifest() (*Manifest, error) - - // RawManifest returns the serialized bytes of Manifest() - RawManifest() ([]byte, error) - - // LayerByDigest returns a Layer for interacting with a particular layer of - // the image, looking it up by "digest" (the compressed hash). - LayerByDigest(Hash) (Layer, error) - - // LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" - // (the uncompressed hash). - LayerByDiffID(Hash) (Layer, error) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go deleted file mode 100644 index 8e7bc8ebb3..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/index.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// ImageIndex defines the interface for interacting with an OCI image index. -type ImageIndex interface { - // MediaType of this image's manifest. - MediaType() (types.MediaType, error) - - // Digest returns the sha256 of this index's manifest. - Digest() (Hash, error) - - // Size returns the size of the manifest. - Size() (int64, error) - - // IndexManifest returns this image index's manifest object. - IndexManifest() (*IndexManifest, error) - - // RawManifest returns the serialized bytes of IndexManifest(). - RawManifest() ([]byte, error) - - // Image returns a v1.Image that this ImageIndex references. - Image(Hash) (Image, error) - - // ImageIndex returns a v1.ImageIndex that this ImageIndex references. - ImageIndex(Hash) (ImageIndex, error) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go deleted file mode 100644 index 57447d263d..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "io" - - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// Layer is an interface for accessing the properties of a particular layer of a v1.Image -type Layer interface { - // Digest returns the Hash of the compressed layer. - Digest() (Hash, error) - - // DiffID returns the Hash of the uncompressed layer. - DiffID() (Hash, error) - - // Compressed returns an io.ReadCloser for the compressed layer contents. - Compressed() (io.ReadCloser, error) - - // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. - Uncompressed() (io.ReadCloser, error) - - // Size returns the compressed size of the Layer. - Size() (int64, error) - - // MediaType returns the media type of the Layer. - MediaType() (types.MediaType, error) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/README.md deleted file mode 100644 index 54bee6d9fd..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# `layout` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/layout?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/layout) - -The `layout` package implements support for interacting with an [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/master/image-layout.md). diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/blob.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/blob.go deleted file mode 100644 index 2e5f4358dd..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/blob.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package layout - -import ( - "io" - "os" - - v1 "github.com/google/go-containerregistry/pkg/v1" -) - -// Blob returns a blob with the given hash from the Path. -func (l Path) Blob(h v1.Hash) (io.ReadCloser, error) { - return os.Open(l.blobPath(h)) -} - -// Bytes is a convenience function to return a blob from the Path as -// a byte slice. -func (l Path) Bytes(h v1.Hash) ([]byte, error) { - return os.ReadFile(l.blobPath(h)) -} - -func (l Path) blobPath(h v1.Hash) string { - return l.path("blobs", h.Algorithm, h.Hex) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/doc.go deleted file mode 100644 index d80d273639..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package layout provides facilities for reading/writing artifacts from/to -// an OCI image layout on disk, see: -// -// https://github.com/opencontainers/image-spec/blob/master/image-layout.md -package layout diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/gc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/gc.go deleted file mode 100644 index 5fdb2c05d2..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/gc.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This is an EXPERIMENTAL package, and may change in arbitrary ways without notice. -package layout - -import ( - "fmt" - "io/fs" - "path/filepath" - "strings" - - v1 "github.com/google/go-containerregistry/pkg/v1" -) - -// GarbageCollect removes unreferenced blobs from the oci-layout -// -// This is an experimental api, and not subject to any stability guarantees -// We may abandon it at any time, without prior notice. -// Deprecated: Use it at your own risk! -func (l Path) GarbageCollect() ([]v1.Hash, error) { - idx, err := l.ImageIndex() - if err != nil { - return nil, err - } - blobsToKeep := map[string]bool{} - if err := l.garbageCollectImageIndex(idx, blobsToKeep); err != nil { - return nil, err - } - blobsDir := l.path("blobs") - removedBlobs := []v1.Hash{} - - err = filepath.WalkDir(blobsDir, func(path string, d fs.DirEntry, err error) error { - if err != nil { - return err - } - - if d.IsDir() { - return nil - } - - rel, err := filepath.Rel(blobsDir, path) - if err != nil { - return err - } - hashString := strings.Replace(rel, "/", ":", 1) - if present := blobsToKeep[hashString]; !present { - h, err := v1.NewHash(hashString) - if err != nil { - return err - } - removedBlobs = append(removedBlobs, h) - } - return nil - }) - - if err != nil { - return nil, err - } - - return removedBlobs, nil -} - -func (l Path) garbageCollectImageIndex(index v1.ImageIndex, blobsToKeep map[string]bool) error { - idxm, err := index.IndexManifest() - if err != nil { - return err - } - - h, err := index.Digest() - if err != nil { - return err - } - - blobsToKeep[h.String()] = true - - for _, descriptor := range idxm.Manifests { - if descriptor.MediaType.IsImage() { - img, err := index.Image(descriptor.Digest) - if err != nil { - return err - } - if err := l.garbageCollectImage(img, blobsToKeep); err != nil { - return err - } - } else if descriptor.MediaType.IsIndex() { - idx, err := index.ImageIndex(descriptor.Digest) - if err != nil { - return err - } - if err := l.garbageCollectImageIndex(idx, blobsToKeep); err != nil { - return err - } - } else { - return fmt.Errorf("gc: unknown media type: %s", descriptor.MediaType) - } - } - return nil -} - -func (l Path) garbageCollectImage(image v1.Image, blobsToKeep map[string]bool) error { - h, err := image.Digest() - if err != nil { - return err - } - blobsToKeep[h.String()] = true - - h, err = image.ConfigName() - if err != nil { - return err - } - blobsToKeep[h.String()] = true - - ls, err := image.Layers() - if err != nil { - return err - } - for _, l := range ls { - h, err := l.Digest() - if err != nil { - return err - } - blobsToKeep[h.String()] = true - } - return nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/image.go deleted file mode 100644 index c9ae9665c3..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/image.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package layout - -import ( - "fmt" - "io" - "os" - "sync" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -type layoutImage struct { - path Path - desc v1.Descriptor - manifestLock sync.Mutex // Protects rawManifest - rawManifest []byte -} - -var _ partial.CompressedImageCore = (*layoutImage)(nil) - -// Image reads a v1.Image with digest h from the Path. -func (l Path) Image(h v1.Hash) (v1.Image, error) { - ii, err := l.ImageIndex() - if err != nil { - return nil, err - } - - return ii.Image(h) -} - -func (li *layoutImage) MediaType() (types.MediaType, error) { - return li.desc.MediaType, nil -} - -// Implements WithManifest for partial.Blobset. -func (li *layoutImage) Manifest() (*v1.Manifest, error) { - return partial.Manifest(li) -} - -func (li *layoutImage) RawManifest() ([]byte, error) { - li.manifestLock.Lock() - defer li.manifestLock.Unlock() - if li.rawManifest != nil { - return li.rawManifest, nil - } - - b, err := li.path.Bytes(li.desc.Digest) - if err != nil { - return nil, err - } - - li.rawManifest = b - return li.rawManifest, nil -} - -func (li *layoutImage) RawConfigFile() ([]byte, error) { - manifest, err := li.Manifest() - if err != nil { - return nil, err - } - - return li.path.Bytes(manifest.Config.Digest) -} - -func (li *layoutImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { - manifest, err := li.Manifest() - if err != nil { - return nil, err - } - - if h == manifest.Config.Digest { - return &compressedBlob{ - path: li.path, - desc: manifest.Config, - }, nil - } - - for _, desc := range manifest.Layers { - if h == desc.Digest { - return &compressedBlob{ - path: li.path, - desc: desc, - }, nil - } - } - - return nil, fmt.Errorf("could not find layer in image: %s", h) -} - -type compressedBlob struct { - path Path - desc v1.Descriptor -} - -func (b *compressedBlob) Digest() (v1.Hash, error) { - return b.desc.Digest, nil -} - -func (b *compressedBlob) Compressed() (io.ReadCloser, error) { - return b.path.Blob(b.desc.Digest) -} - -func (b *compressedBlob) Size() (int64, error) { - return b.desc.Size, nil -} - -func (b *compressedBlob) MediaType() (types.MediaType, error) { - return b.desc.MediaType, nil -} - -// Descriptor implements partial.withDescriptor. -func (b *compressedBlob) Descriptor() (*v1.Descriptor, error) { - return &b.desc, nil -} - -// See partial.Exists. -func (b *compressedBlob) Exists() (bool, error) { - _, err := os.Stat(b.path.blobPath(b.desc.Digest)) - if os.IsNotExist(err) { - return false, nil - } - return err == nil, err -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/index.go deleted file mode 100644 index 7404f186b9..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/index.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package layout - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "os" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -var _ v1.ImageIndex = (*layoutIndex)(nil) - -type layoutIndex struct { - mediaType types.MediaType - path Path - rawIndex []byte -} - -// ImageIndexFromPath is a convenience function which constructs a Path and returns its v1.ImageIndex. -func ImageIndexFromPath(path string) (v1.ImageIndex, error) { - lp, err := FromPath(path) - if err != nil { - return nil, err - } - return lp.ImageIndex() -} - -// ImageIndex returns a v1.ImageIndex for the Path. -func (l Path) ImageIndex() (v1.ImageIndex, error) { - rawIndex, err := os.ReadFile(l.path("index.json")) - if err != nil { - return nil, err - } - - idx := &layoutIndex{ - mediaType: types.OCIImageIndex, - path: l, - rawIndex: rawIndex, - } - - return idx, nil -} - -func (i *layoutIndex) MediaType() (types.MediaType, error) { - return i.mediaType, nil -} - -func (i *layoutIndex) Digest() (v1.Hash, error) { - return partial.Digest(i) -} - -func (i *layoutIndex) Size() (int64, error) { - return partial.Size(i) -} - -func (i *layoutIndex) IndexManifest() (*v1.IndexManifest, error) { - var index v1.IndexManifest - err := json.Unmarshal(i.rawIndex, &index) - return &index, err -} - -func (i *layoutIndex) RawManifest() ([]byte, error) { - return i.rawIndex, nil -} - -func (i *layoutIndex) Image(h v1.Hash) (v1.Image, error) { - // Look up the digest in our manifest first to return a better error. - desc, err := i.findDescriptor(h) - if err != nil { - return nil, err - } - - if !isExpectedMediaType(desc.MediaType, types.OCIManifestSchema1, types.DockerManifestSchema2) { - return nil, fmt.Errorf("unexpected media type for %v: %s", h, desc.MediaType) - } - - img := &layoutImage{ - path: i.path, - desc: *desc, - } - return partial.CompressedToImage(img) -} - -func (i *layoutIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { - // Look up the digest in our manifest first to return a better error. - desc, err := i.findDescriptor(h) - if err != nil { - return nil, err - } - - if !isExpectedMediaType(desc.MediaType, types.OCIImageIndex, types.DockerManifestList) { - return nil, fmt.Errorf("unexpected media type for %v: %s", h, desc.MediaType) - } - - rawIndex, err := i.path.Bytes(h) - if err != nil { - return nil, err - } - - return &layoutIndex{ - mediaType: desc.MediaType, - path: i.path, - rawIndex: rawIndex, - }, nil -} - -func (i *layoutIndex) Blob(h v1.Hash) (io.ReadCloser, error) { - return i.path.Blob(h) -} - -func (i *layoutIndex) findDescriptor(h v1.Hash) (*v1.Descriptor, error) { - im, err := i.IndexManifest() - if err != nil { - return nil, err - } - - if h == (v1.Hash{}) { - if len(im.Manifests) != 1 { - return nil, errors.New("oci layout must contain only a single image to be used with layout.Image") - } - return &(im.Manifests)[0], nil - } - - for _, desc := range im.Manifests { - if desc.Digest == h { - return &desc, nil - } - } - - return nil, fmt.Errorf("could not find descriptor in index: %s", h) -} - -// TODO: Pull this out into methods on types.MediaType? e.g. instead, have: -// * mt.IsIndex() -// * mt.IsImage() -func isExpectedMediaType(mt types.MediaType, expected ...types.MediaType) bool { - for _, allowed := range expected { - if mt == allowed { - return true - } - } - return false -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/layoutpath.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/layoutpath.go deleted file mode 100644 index a031ff5ae9..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/layoutpath.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2019 The original author or authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package layout - -import "path/filepath" - -// Path represents an OCI image layout rooted in a file system path -type Path string - -func (l Path) path(elem ...string) string { - complete := []string{string(l)} - return filepath.Join(append(complete, elem...)...) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/options.go deleted file mode 100644 index a26f9f3710..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/options.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package layout - -import v1 "github.com/google/go-containerregistry/pkg/v1" - -// Option is a functional option for Layout. -type Option func(*options) - -type options struct { - descOpts []descriptorOption -} - -func makeOptions(opts ...Option) *options { - o := &options{ - descOpts: []descriptorOption{}, - } - for _, apply := range opts { - apply(o) - } - return o -} - -type descriptorOption func(*v1.Descriptor) - -// WithAnnotations adds annotations to the artifact descriptor. -func WithAnnotations(annotations map[string]string) Option { - return func(o *options) { - o.descOpts = append(o.descOpts, func(desc *v1.Descriptor) { - if desc.Annotations == nil { - desc.Annotations = make(map[string]string) - } - for k, v := range annotations { - desc.Annotations[k] = v - } - }) - } -} - -// WithURLs adds urls to the artifact descriptor. -func WithURLs(urls []string) Option { - return func(o *options) { - o.descOpts = append(o.descOpts, func(desc *v1.Descriptor) { - if desc.URLs == nil { - desc.URLs = []string{} - } - desc.URLs = append(desc.URLs, urls...) - }) - } -} - -// WithPlatform sets the platform of the artifact descriptor. -func WithPlatform(platform v1.Platform) Option { - return func(o *options) { - o.descOpts = append(o.descOpts, func(desc *v1.Descriptor) { - desc.Platform = &platform - }) - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/read.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/read.go deleted file mode 100644 index 796abc7dd0..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/read.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019 The original author or authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package layout - -import ( - "os" - "path/filepath" -) - -// FromPath reads an OCI image layout at path and constructs a layout.Path. -func FromPath(path string) (Path, error) { - // TODO: check oci-layout exists - - _, err := os.Stat(filepath.Join(path, "index.json")) - if err != nil { - return "", err - } - - return Path(path), nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go deleted file mode 100644 index d6e35c3914..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go +++ /dev/null @@ -1,482 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package layout - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "path/filepath" - - "github.com/google/go-containerregistry/pkg/logs" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/match" - "github.com/google/go-containerregistry/pkg/v1/mutate" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/stream" - "github.com/google/go-containerregistry/pkg/v1/types" - "golang.org/x/sync/errgroup" -) - -var layoutFile = `{ - "imageLayoutVersion": "1.0.0" -}` - -// AppendImage writes a v1.Image to the Path and updates -// the index.json to reference it. -func (l Path) AppendImage(img v1.Image, options ...Option) error { - if err := l.WriteImage(img); err != nil { - return err - } - - desc, err := partial.Descriptor(img) - if err != nil { - return err - } - - o := makeOptions(options...) - for _, opt := range o.descOpts { - opt(desc) - } - - return l.AppendDescriptor(*desc) -} - -// AppendIndex writes a v1.ImageIndex to the Path and updates -// the index.json to reference it. -func (l Path) AppendIndex(ii v1.ImageIndex, options ...Option) error { - if err := l.WriteIndex(ii); err != nil { - return err - } - - desc, err := partial.Descriptor(ii) - if err != nil { - return err - } - - o := makeOptions(options...) - for _, opt := range o.descOpts { - opt(desc) - } - - return l.AppendDescriptor(*desc) -} - -// AppendDescriptor adds a descriptor to the index.json of the Path. -func (l Path) AppendDescriptor(desc v1.Descriptor) error { - ii, err := l.ImageIndex() - if err != nil { - return err - } - - index, err := ii.IndexManifest() - if err != nil { - return err - } - - index.Manifests = append(index.Manifests, desc) - - rawIndex, err := json.MarshalIndent(index, "", " ") - if err != nil { - return err - } - - return l.WriteFile("index.json", rawIndex, os.ModePerm) -} - -// ReplaceImage writes a v1.Image to the Path and updates -// the index.json to reference it, replacing any existing one that matches matcher, if found. -func (l Path) ReplaceImage(img v1.Image, matcher match.Matcher, options ...Option) error { - if err := l.WriteImage(img); err != nil { - return err - } - - return l.replaceDescriptor(img, matcher, options...) -} - -// ReplaceIndex writes a v1.ImageIndex to the Path and updates -// the index.json to reference it, replacing any existing one that matches matcher, if found. -func (l Path) ReplaceIndex(ii v1.ImageIndex, matcher match.Matcher, options ...Option) error { - if err := l.WriteIndex(ii); err != nil { - return err - } - - return l.replaceDescriptor(ii, matcher, options...) -} - -// replaceDescriptor adds a descriptor to the index.json of the Path, replacing -// any one matching matcher, if found. -func (l Path) replaceDescriptor(append mutate.Appendable, matcher match.Matcher, options ...Option) error { - ii, err := l.ImageIndex() - if err != nil { - return err - } - - desc, err := partial.Descriptor(append) - if err != nil { - return err - } - - o := makeOptions(options...) - for _, opt := range o.descOpts { - opt(desc) - } - - add := mutate.IndexAddendum{ - Add: append, - Descriptor: *desc, - } - ii = mutate.AppendManifests(mutate.RemoveManifests(ii, matcher), add) - - index, err := ii.IndexManifest() - if err != nil { - return err - } - - rawIndex, err := json.MarshalIndent(index, "", " ") - if err != nil { - return err - } - - return l.WriteFile("index.json", rawIndex, os.ModePerm) -} - -// RemoveDescriptors removes any descriptors that match the match.Matcher from the index.json of the Path. -func (l Path) RemoveDescriptors(matcher match.Matcher) error { - ii, err := l.ImageIndex() - if err != nil { - return err - } - ii = mutate.RemoveManifests(ii, matcher) - - index, err := ii.IndexManifest() - if err != nil { - return err - } - - rawIndex, err := json.MarshalIndent(index, "", " ") - if err != nil { - return err - } - - return l.WriteFile("index.json", rawIndex, os.ModePerm) -} - -// WriteFile write a file with arbitrary data at an arbitrary location in a v1 -// layout. Used mostly internally to write files like "oci-layout" and -// "index.json", also can be used to write other arbitrary files. Do *not* use -// this to write blobs. Use only WriteBlob() for that. -func (l Path) WriteFile(name string, data []byte, perm os.FileMode) error { - if err := os.MkdirAll(l.path(), os.ModePerm); err != nil && !os.IsExist(err) { - return err - } - - return os.WriteFile(l.path(name), data, perm) -} - -// WriteBlob copies a file to the blobs/ directory in the Path from the given ReadCloser at -// blobs/{hash.Algorithm}/{hash.Hex}. -func (l Path) WriteBlob(hash v1.Hash, r io.ReadCloser) error { - return l.writeBlob(hash, -1, r, nil) -} - -func (l Path) writeBlob(hash v1.Hash, size int64, rc io.ReadCloser, renamer func() (v1.Hash, error)) error { - defer rc.Close() - if hash.Hex == "" && renamer == nil { - panic("writeBlob called an invalid hash and no renamer") - } - - dir := l.path("blobs", hash.Algorithm) - if err := os.MkdirAll(dir, os.ModePerm); err != nil && !os.IsExist(err) { - return err - } - - // Check if blob already exists and is the correct size - file := filepath.Join(dir, hash.Hex) - if s, err := os.Stat(file); err == nil && !s.IsDir() && (s.Size() == size || size == -1) { - return nil - } - - // If a renamer func was provided write to a temporary file - open := func() (*os.File, error) { return os.Create(file) } - if renamer != nil { - open = func() (*os.File, error) { return os.CreateTemp(dir, hash.Hex) } - } - w, err := open() - if err != nil { - return err - } - if renamer != nil { - // Delete temp file if an error is encountered before renaming - defer func() { - if err := os.Remove(w.Name()); err != nil && !errors.Is(err, os.ErrNotExist) { - logs.Warn.Printf("error removing temporary file after encountering an error while writing blob: %v", err) - } - }() - } - defer w.Close() - - // Write to file and exit if not renaming - if n, err := io.Copy(w, rc); err != nil || renamer == nil { - return err - } else if size != -1 && n != size { - return fmt.Errorf("expected blob size %d, but only wrote %d", size, n) - } - - // Always close reader before renaming, since Close computes the digest in - // the case of streaming layers. If Close is not called explicitly, it will - // occur in a goroutine that is not guaranteed to succeed before renamer is - // called. When renamer is the layer's Digest method, it can return - // ErrNotComputed. - if err := rc.Close(); err != nil { - return err - } - - // Always close file before renaming - if err := w.Close(); err != nil { - return err - } - - // Rename file based on the final hash - finalHash, err := renamer() - if err != nil { - return fmt.Errorf("error getting final digest of layer: %w", err) - } - - renamePath := l.path("blobs", finalHash.Algorithm, finalHash.Hex) - return os.Rename(w.Name(), renamePath) -} - -// writeLayer writes the compressed layer to a blob. Unlike WriteBlob it will -// write to a temporary file (suffixed with .tmp) within the layout until the -// compressed reader is fully consumed and written to disk. Also unlike -// WriteBlob, it will not skip writing and exit without error when a blob file -// exists, but does not have the correct size. (The blob hash is not -// considered, because it may be expensive to compute.) -func (l Path) writeLayer(layer v1.Layer) error { - d, err := layer.Digest() - if errors.Is(err, stream.ErrNotComputed) { - // Allow digest errors, since streams may not have calculated the hash - // yet. Instead, use an empty value, which will be transformed into a - // random file name with `os.CreateTemp` and the final digest will be - // calculated after writing to a temp file and before renaming to the - // final path. - d = v1.Hash{Algorithm: "sha256", Hex: ""} - } else if err != nil { - return err - } - - s, err := layer.Size() - if errors.Is(err, stream.ErrNotComputed) { - // Allow size errors, since streams may not have calculated the size - // yet. Instead, use zero as a sentinel value meaning that no size - // comparison can be done and any sized blob file should be considered - // valid and not overwritten. - // - // TODO: Provide an option to always overwrite blobs. - s = -1 - } else if err != nil { - return err - } - - r, err := layer.Compressed() - if err != nil { - return err - } - - if err := l.writeBlob(d, s, r, layer.Digest); err != nil { - return fmt.Errorf("error writing layer: %w", err) - } - return nil -} - -// RemoveBlob removes a file from the blobs directory in the Path -// at blobs/{hash.Algorithm}/{hash.Hex} -// It does *not* remove any reference to it from other manifests or indexes, or -// from the root index.json. -func (l Path) RemoveBlob(hash v1.Hash) error { - dir := l.path("blobs", hash.Algorithm) - err := os.Remove(filepath.Join(dir, hash.Hex)) - if err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// WriteImage writes an image, including its manifest, config and all of its -// layers, to the blobs directory. If any blob already exists, as determined by -// the hash filename, does not write it. -// This function does *not* update the `index.json` file. If you want to write the -// image and also update the `index.json`, call AppendImage(), which wraps this -// and also updates the `index.json`. -func (l Path) WriteImage(img v1.Image) error { - layers, err := img.Layers() - if err != nil { - return err - } - - // Write the layers concurrently. - var g errgroup.Group - for _, layer := range layers { - layer := layer - g.Go(func() error { - return l.writeLayer(layer) - }) - } - if err := g.Wait(); err != nil { - return err - } - - // Write the config. - cfgName, err := img.ConfigName() - if err != nil { - return err - } - cfgBlob, err := img.RawConfigFile() - if err != nil { - return err - } - if err := l.WriteBlob(cfgName, io.NopCloser(bytes.NewReader(cfgBlob))); err != nil { - return err - } - - // Write the img manifest. - d, err := img.Digest() - if err != nil { - return err - } - manifest, err := img.RawManifest() - if err != nil { - return err - } - - return l.WriteBlob(d, io.NopCloser(bytes.NewReader(manifest))) -} - -type withLayer interface { - Layer(v1.Hash) (v1.Layer, error) -} - -type withBlob interface { - Blob(v1.Hash) (io.ReadCloser, error) -} - -func (l Path) writeIndexToFile(indexFile string, ii v1.ImageIndex) error { - index, err := ii.IndexManifest() - if err != nil { - return err - } - - // Walk the descriptors and write any v1.Image or v1.ImageIndex that we find. - // If we come across something we don't expect, just write it as a blob. - for _, desc := range index.Manifests { - switch desc.MediaType { - case types.OCIImageIndex, types.DockerManifestList: - ii, err := ii.ImageIndex(desc.Digest) - if err != nil { - return err - } - if err := l.WriteIndex(ii); err != nil { - return err - } - case types.OCIManifestSchema1, types.DockerManifestSchema2: - img, err := ii.Image(desc.Digest) - if err != nil { - return err - } - if err := l.WriteImage(img); err != nil { - return err - } - default: - // TODO: The layout could reference arbitrary things, which we should - // probably just pass through. - - var blob io.ReadCloser - // Workaround for #819. - if wl, ok := ii.(withLayer); ok { - layer, lerr := wl.Layer(desc.Digest) - if lerr != nil { - return lerr - } - blob, err = layer.Compressed() - } else if wb, ok := ii.(withBlob); ok { - blob, err = wb.Blob(desc.Digest) - } - if err != nil { - return err - } - if err := l.WriteBlob(desc.Digest, blob); err != nil { - return err - } - } - } - - rawIndex, err := ii.RawManifest() - if err != nil { - return err - } - - return l.WriteFile(indexFile, rawIndex, os.ModePerm) -} - -// WriteIndex writes an index to the blobs directory. Walks down the children, -// including its children manifests and/or indexes, and down the tree until all of -// config and all layers, have been written. If any blob already exists, as determined by -// the hash filename, does not write it. -// This function does *not* update the `index.json` file. If you want to write the -// index and also update the `index.json`, call AppendIndex(), which wraps this -// and also updates the `index.json`. -func (l Path) WriteIndex(ii v1.ImageIndex) error { - // Always just write oci-layout file, since it's small. - if err := l.WriteFile("oci-layout", []byte(layoutFile), os.ModePerm); err != nil { - return err - } - - h, err := ii.Digest() - if err != nil { - return err - } - - indexFile := filepath.Join("blobs", h.Algorithm, h.Hex) - return l.writeIndexToFile(indexFile, ii) -} - -// Write constructs a Path at path from an ImageIndex. -// -// The contents are written in the following format: -// At the top level, there is: -// -// One oci-layout file containing the version of this image-layout. -// One index.json file listing descriptors for the contained images. -// -// Under blobs/, there is, for each image: -// -// One file for each layer, named after the layer's SHA. -// One file for each config blob, named after its SHA. -// One file for each manifest blob, named after its SHA. -func Write(path string, ii v1.ImageIndex) (Path, error) { - lp := Path(path) - // Always just write oci-layout file, since it's small. - if err := lp.WriteFile("oci-layout", []byte(layoutFile), os.ModePerm); err != nil { - return "", err - } - - // TODO create blobs/ in case there is a blobs file which would prevent the directory from being created - - return lp, lp.writeIndexToFile("index.json", ii) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go deleted file mode 100644 index 22d483f3bd..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "encoding/json" - "io" - - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// Manifest represents the OCI image manifest in a structured way. -type Manifest struct { - SchemaVersion int64 `json:"schemaVersion"` - MediaType types.MediaType `json:"mediaType,omitempty"` - Config Descriptor `json:"config"` - Layers []Descriptor `json:"layers"` - Annotations map[string]string `json:"annotations,omitempty"` - Subject *Descriptor `json:"subject,omitempty"` -} - -// IndexManifest represents an OCI image index in a structured way. -type IndexManifest struct { - SchemaVersion int64 `json:"schemaVersion"` - MediaType types.MediaType `json:"mediaType,omitempty"` - Manifests []Descriptor `json:"manifests"` - Annotations map[string]string `json:"annotations,omitempty"` - Subject *Descriptor `json:"subject,omitempty"` -} - -// Descriptor holds a reference from the manifest to one of its constituent elements. -type Descriptor struct { - MediaType types.MediaType `json:"mediaType"` - Size int64 `json:"size"` - Digest Hash `json:"digest"` - Data []byte `json:"data,omitempty"` - URLs []string `json:"urls,omitempty"` - Annotations map[string]string `json:"annotations,omitempty"` - Platform *Platform `json:"platform,omitempty"` - ArtifactType string `json:"artifactType,omitempty"` -} - -// ParseManifest parses the io.Reader's contents into a Manifest. -func ParseManifest(r io.Reader) (*Manifest, error) { - m := Manifest{} - if err := json.NewDecoder(r).Decode(&m); err != nil { - return nil, err - } - return &m, nil -} - -// ParseIndexManifest parses the io.Reader's contents into an IndexManifest. -func ParseIndexManifest(r io.Reader) (*IndexManifest, error) { - im := IndexManifest{} - if err := json.NewDecoder(r).Decode(&im); err != nil { - return nil, err - } - return &im, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go b/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go deleted file mode 100644 index 98b1ff9094..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2020 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package match provides functionality for conveniently matching a v1.Descriptor. -package match - -import ( - v1 "github.com/google/go-containerregistry/pkg/v1" - imagespec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// Matcher function that is given a v1.Descriptor, and returns whether or -// not it matches a given rule. Can match on anything it wants in the Descriptor. -type Matcher func(desc v1.Descriptor) bool - -// Name returns a match.Matcher that matches based on the value of the -// -// "org.opencontainers.image.ref.name" annotation: -// -// github.com/opencontainers/image-spec/blob/v1.0.1/annotations.md#pre-defined-annotation-keys -func Name(name string) Matcher { - return Annotation(imagespec.AnnotationRefName, name) -} - -// Annotation returns a match.Matcher that matches based on the provided annotation. -func Annotation(key, value string) Matcher { - return func(desc v1.Descriptor) bool { - if desc.Annotations == nil { - return false - } - if aValue, ok := desc.Annotations[key]; ok && aValue == value { - return true - } - return false - } -} - -// Platforms returns a match.Matcher that matches on any one of the provided platforms. -// Ignores any descriptors that do not have a platform. -func Platforms(platforms ...v1.Platform) Matcher { - return func(desc v1.Descriptor) bool { - if desc.Platform == nil { - return false - } - for _, platform := range platforms { - if desc.Platform.Equals(platform) { - return true - } - } - return false - } -} - -// MediaTypes returns a match.Matcher that matches at least one of the provided media types. -func MediaTypes(mediaTypes ...string) Matcher { - mts := map[string]bool{} - for _, media := range mediaTypes { - mts[media] = true - } - return func(desc v1.Descriptor) bool { - if desc.MediaType == "" { - return false - } - if _, ok := mts[string(desc.MediaType)]; ok { - return true - } - return false - } -} - -// Digests returns a match.Matcher that matches at least one of the provided Digests -func Digests(digests ...v1.Hash) Matcher { - digs := map[v1.Hash]bool{} - for _, digest := range digests { - digs[digest] = true - } - return func(desc v1.Descriptor) bool { - _, ok := digs[desc.Digest] - return ok - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md deleted file mode 100644 index 19e1612433..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# `mutate` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate) - -The `v1.Image`, `v1.ImageIndex`, and `v1.Layer` interfaces provide only -accessor methods, so they are essentially immutable. If you want to change -something about them, you need to produce a new instance of that interface. - -A common use case for this library is to read an image from somewhere (a source), -change something about it, and write the image somewhere else (a sink). - -Graphically, this looks something like: - -

- -

- -## Mutations - -This is obviously not a comprehensive set of useful transformations (PRs welcome!), -but a rough summary of what the `mutate` package currently does: - -### `Config` and `ConfigFile` - -These allow you to change the [image configuration](https://github.com/opencontainers/image-spec/blob/master/config.md#properties), -e.g. to change the entrypoint, environment, author, etc. - -### `Time`, `Canonical`, and `CreatedAt` - -These are useful in the context of [reproducible builds](https://reproducible-builds.org/), -where you may want to strip timestamps and other non-reproducible information. - -### `Append`, `AppendLayers`, and `AppendManifests` - -These functions allow the extension of a `v1.Image` or `v1.ImageIndex` with -new layers or manifests. - -For constructing an image `FROM scratch`, see the [`empty`](/pkg/v1/empty) package. - -### `MediaType` and `IndexMediaType` - -Sometimes, it is necessary to change the media type of an image or index, -e.g. to appease a registry with strict validation of images (_looking at you, GCR_). - -### `Rebase` - -Rebase has [its own README](/cmd/crane/rebase.md). - -This is the underlying implementation of [`crane rebase`](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane_rebase.md). - -### `Extract` - -Extract will flatten an image filesystem into a single tar stream, -respecting whiteout files. - -This is the underlying implementation of [`crane export`](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane_export.md). diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go deleted file mode 100644 index dfbd9951e0..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package mutate provides facilities for mutating v1.Images of any kind. -package mutate diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go deleted file mode 100644 index 3ea27fe47f..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mutate - -import ( - "bytes" - "encoding/json" - "errors" - "sync" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/stream" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -type image struct { - base v1.Image - adds []Addendum - - computed bool - configFile *v1.ConfigFile - manifest *v1.Manifest - annotations map[string]string - mediaType *types.MediaType - configMediaType *types.MediaType - diffIDMap map[v1.Hash]v1.Layer - digestMap map[v1.Hash]v1.Layer - subject *v1.Descriptor - - sync.Mutex -} - -var _ v1.Image = (*image)(nil) - -func (i *image) MediaType() (types.MediaType, error) { - if i.mediaType != nil { - return *i.mediaType, nil - } - return i.base.MediaType() -} - -func (i *image) compute() error { - i.Lock() - defer i.Unlock() - - // Don't re-compute if already computed. - if i.computed { - return nil - } - var configFile *v1.ConfigFile - if i.configFile != nil { - configFile = i.configFile - } else { - cf, err := i.base.ConfigFile() - if err != nil { - return err - } - configFile = cf.DeepCopy() - } - diffIDs := configFile.RootFS.DiffIDs - history := configFile.History - - diffIDMap := make(map[v1.Hash]v1.Layer) - digestMap := make(map[v1.Hash]v1.Layer) - - for _, add := range i.adds { - history = append(history, add.History) - if add.Layer != nil { - diffID, err := add.Layer.DiffID() - if err != nil { - return err - } - diffIDs = append(diffIDs, diffID) - diffIDMap[diffID] = add.Layer - } - } - - m, err := i.base.Manifest() - if err != nil { - return err - } - manifest := m.DeepCopy() - manifestLayers := manifest.Layers - for _, add := range i.adds { - if add.Layer == nil { - // Empty layers include only history in manifest. - continue - } - - desc, err := partial.Descriptor(add.Layer) - if err != nil { - return err - } - - // Fields in the addendum override the original descriptor. - if len(add.Annotations) != 0 { - desc.Annotations = add.Annotations - } - if len(add.URLs) != 0 { - desc.URLs = add.URLs - } - - if add.MediaType != "" { - desc.MediaType = add.MediaType - } - - manifestLayers = append(manifestLayers, *desc) - digestMap[desc.Digest] = add.Layer - } - - configFile.RootFS.DiffIDs = diffIDs - configFile.History = history - - manifest.Layers = manifestLayers - - rcfg, err := json.Marshal(configFile) - if err != nil { - return err - } - d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg)) - if err != nil { - return err - } - manifest.Config.Digest = d - manifest.Config.Size = sz - - // If Data was set in the base image, we need to update it in the mutated image. - if m.Config.Data != nil { - manifest.Config.Data = rcfg - } - - // If the user wants to mutate the media type of the config - if i.configMediaType != nil { - manifest.Config.MediaType = *i.configMediaType - } - - if i.mediaType != nil { - manifest.MediaType = *i.mediaType - } - - if i.annotations != nil { - if manifest.Annotations == nil { - manifest.Annotations = map[string]string{} - } - - for k, v := range i.annotations { - manifest.Annotations[k] = v - } - } - manifest.Subject = i.subject - - i.configFile = configFile - i.manifest = manifest - i.diffIDMap = diffIDMap - i.digestMap = digestMap - i.computed = true - return nil -} - -// Layers returns the ordered collection of filesystem layers that comprise this image. -// The order of the list is oldest/base layer first, and most-recent/top layer last. -func (i *image) Layers() ([]v1.Layer, error) { - if err := i.compute(); errors.Is(err, stream.ErrNotComputed) { - // Image contains a streamable layer which has not yet been - // consumed. Just return the layers we have in case the caller - // is going to consume the layers. - layers, err := i.base.Layers() - if err != nil { - return nil, err - } - for _, add := range i.adds { - layers = append(layers, add.Layer) - } - return layers, nil - } else if err != nil { - return nil, err - } - - diffIDs, err := partial.DiffIDs(i) - if err != nil { - return nil, err - } - ls := make([]v1.Layer, 0, len(diffIDs)) - for _, h := range diffIDs { - l, err := i.LayerByDiffID(h) - if err != nil { - return nil, err - } - ls = append(ls, l) - } - return ls, nil -} - -// ConfigName returns the hash of the image's config file. -func (i *image) ConfigName() (v1.Hash, error) { - if err := i.compute(); err != nil { - return v1.Hash{}, err - } - return partial.ConfigName(i) -} - -// ConfigFile returns this image's config file. -func (i *image) ConfigFile() (*v1.ConfigFile, error) { - if err := i.compute(); err != nil { - return nil, err - } - return i.configFile.DeepCopy(), nil -} - -// RawConfigFile returns the serialized bytes of ConfigFile() -func (i *image) RawConfigFile() ([]byte, error) { - if err := i.compute(); err != nil { - return nil, err - } - return json.Marshal(i.configFile) -} - -// Digest returns the sha256 of this image's manifest. -func (i *image) Digest() (v1.Hash, error) { - if err := i.compute(); err != nil { - return v1.Hash{}, err - } - return partial.Digest(i) -} - -// Size implements v1.Image. -func (i *image) Size() (int64, error) { - if err := i.compute(); err != nil { - return -1, err - } - return partial.Size(i) -} - -// Manifest returns this image's Manifest object. -func (i *image) Manifest() (*v1.Manifest, error) { - if err := i.compute(); err != nil { - return nil, err - } - return i.manifest.DeepCopy(), nil -} - -// RawManifest returns the serialized bytes of Manifest() -func (i *image) RawManifest() ([]byte, error) { - if err := i.compute(); err != nil { - return nil, err - } - return json.Marshal(i.manifest) -} - -// LayerByDigest returns a Layer for interacting with a particular layer of -// the image, looking it up by "digest" (the compressed hash). -func (i *image) LayerByDigest(h v1.Hash) (v1.Layer, error) { - if cn, err := i.ConfigName(); err != nil { - return nil, err - } else if h == cn { - return partial.ConfigLayer(i) - } - if layer, ok := i.digestMap[h]; ok { - return layer, nil - } - return i.base.LayerByDigest(h) -} - -// LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" -// (the uncompressed hash). -func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) { - if layer, ok := i.diffIDMap[h]; ok { - return layer, nil - } - return i.base.LayerByDiffID(h) -} - -func validate(adds []Addendum) error { - for _, add := range adds { - if add.Layer == nil && !add.History.EmptyLayer { - return errors.New("unable to add a nil layer to the image") - } - } - return nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go deleted file mode 100644 index 512effef67..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mutate - -import ( - "encoding/json" - "errors" - "fmt" - "sync" - - "github.com/google/go-containerregistry/pkg/logs" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/match" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/stream" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -func computeDescriptor(ia IndexAddendum) (*v1.Descriptor, error) { - desc, err := partial.Descriptor(ia.Add) - if err != nil { - return nil, err - } - - // The IndexAddendum allows overriding Descriptor values. - if ia.Descriptor.Size != 0 { - desc.Size = ia.Descriptor.Size - } - if string(ia.Descriptor.MediaType) != "" { - desc.MediaType = ia.Descriptor.MediaType - } - if ia.Descriptor.Digest != (v1.Hash{}) { - desc.Digest = ia.Descriptor.Digest - } - if ia.Descriptor.Platform != nil { - desc.Platform = ia.Descriptor.Platform - } - if len(ia.Descriptor.URLs) != 0 { - desc.URLs = ia.Descriptor.URLs - } - if len(ia.Descriptor.Annotations) != 0 { - desc.Annotations = ia.Descriptor.Annotations - } - if ia.Descriptor.Data != nil { - desc.Data = ia.Descriptor.Data - } - - return desc, nil -} - -type index struct { - base v1.ImageIndex - adds []IndexAddendum - // remove is removed before adds - remove match.Matcher - - computed bool - manifest *v1.IndexManifest - annotations map[string]string - mediaType *types.MediaType - imageMap map[v1.Hash]v1.Image - indexMap map[v1.Hash]v1.ImageIndex - layerMap map[v1.Hash]v1.Layer - subject *v1.Descriptor - - sync.Mutex -} - -var _ v1.ImageIndex = (*index)(nil) - -func (i *index) MediaType() (types.MediaType, error) { - if i.mediaType != nil { - return *i.mediaType, nil - } - return i.base.MediaType() -} - -func (i *index) Size() (int64, error) { return partial.Size(i) } - -func (i *index) compute() error { - i.Lock() - defer i.Unlock() - - // Don't re-compute if already computed. - if i.computed { - return nil - } - - i.imageMap = make(map[v1.Hash]v1.Image) - i.indexMap = make(map[v1.Hash]v1.ImageIndex) - i.layerMap = make(map[v1.Hash]v1.Layer) - - m, err := i.base.IndexManifest() - if err != nil { - return err - } - manifest := m.DeepCopy() - manifests := manifest.Manifests - - if i.remove != nil { - var cleanedManifests []v1.Descriptor - for _, m := range manifests { - if !i.remove(m) { - cleanedManifests = append(cleanedManifests, m) - } - } - manifests = cleanedManifests - } - - for _, add := range i.adds { - desc, err := computeDescriptor(add) - if err != nil { - return err - } - - manifests = append(manifests, *desc) - if idx, ok := add.Add.(v1.ImageIndex); ok { - i.indexMap[desc.Digest] = idx - } else if img, ok := add.Add.(v1.Image); ok { - i.imageMap[desc.Digest] = img - } else if l, ok := add.Add.(v1.Layer); ok { - i.layerMap[desc.Digest] = l - } else { - logs.Warn.Printf("Unexpected index addendum: %T", add.Add) - } - } - - manifest.Manifests = manifests - - if i.mediaType != nil { - manifest.MediaType = *i.mediaType - } - - if i.annotations != nil { - if manifest.Annotations == nil { - manifest.Annotations = map[string]string{} - } - for k, v := range i.annotations { - manifest.Annotations[k] = v - } - } - manifest.Subject = i.subject - - i.manifest = manifest - i.computed = true - return nil -} - -func (i *index) Image(h v1.Hash) (v1.Image, error) { - if img, ok := i.imageMap[h]; ok { - return img, nil - } - return i.base.Image(h) -} - -func (i *index) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { - if idx, ok := i.indexMap[h]; ok { - return idx, nil - } - return i.base.ImageIndex(h) -} - -type withLayer interface { - Layer(v1.Hash) (v1.Layer, error) -} - -// Workaround for #819. -func (i *index) Layer(h v1.Hash) (v1.Layer, error) { - if layer, ok := i.layerMap[h]; ok { - return layer, nil - } - if wl, ok := i.base.(withLayer); ok { - return wl.Layer(h) - } - return nil, fmt.Errorf("layer not found: %s", h) -} - -// Digest returns the sha256 of this image's manifest. -func (i *index) Digest() (v1.Hash, error) { - if err := i.compute(); err != nil { - return v1.Hash{}, err - } - return partial.Digest(i) -} - -// Manifest returns this image's Manifest object. -func (i *index) IndexManifest() (*v1.IndexManifest, error) { - if err := i.compute(); err != nil { - return nil, err - } - return i.manifest.DeepCopy(), nil -} - -// RawManifest returns the serialized bytes of Manifest() -func (i *index) RawManifest() ([]byte, error) { - if err := i.compute(); err != nil { - return nil, err - } - return json.Marshal(i.manifest) -} - -func (i *index) Manifests() ([]partial.Describable, error) { - if err := i.compute(); errors.Is(err, stream.ErrNotComputed) { - // Index contains a streamable layer which has not yet been - // consumed. Just return the manifests we have in case the caller - // is going to consume the streamable layers. - manifests, err := partial.Manifests(i.base) - if err != nil { - return nil, err - } - for _, add := range i.adds { - manifests = append(manifests, add.Add) - } - return manifests, nil - } else if err != nil { - return nil, err - } - - return partial.ComputeManifests(i) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go deleted file mode 100644 index 1a24b10d76..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go +++ /dev/null @@ -1,555 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mutate - -import ( - "archive/tar" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "path/filepath" - "strings" - "time" - - "github.com/google/go-containerregistry/internal/gzip" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/empty" - "github.com/google/go-containerregistry/pkg/v1/match" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/tarball" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -const whiteoutPrefix = ".wh." - -// Addendum contains layers and history to be appended -// to a base image -type Addendum struct { - Layer v1.Layer - History v1.History - URLs []string - Annotations map[string]string - MediaType types.MediaType -} - -// AppendLayers applies layers to a base image. -func AppendLayers(base v1.Image, layers ...v1.Layer) (v1.Image, error) { - additions := make([]Addendum, 0, len(layers)) - for _, layer := range layers { - additions = append(additions, Addendum{Layer: layer}) - } - - return Append(base, additions...) -} - -// Append will apply the list of addendums to the base image -func Append(base v1.Image, adds ...Addendum) (v1.Image, error) { - if len(adds) == 0 { - return base, nil - } - if err := validate(adds); err != nil { - return nil, err - } - - return &image{ - base: base, - adds: adds, - }, nil -} - -// Appendable is an interface that represents something that can be appended -// to an ImageIndex. We need to be able to construct a v1.Descriptor in order -// to append something, and this is the minimum required information for that. -type Appendable interface { - MediaType() (types.MediaType, error) - Digest() (v1.Hash, error) - Size() (int64, error) -} - -// IndexAddendum represents an appendable thing and all the properties that -// we may want to override in the resulting v1.Descriptor. -type IndexAddendum struct { - Add Appendable - v1.Descriptor -} - -// AppendManifests appends a manifest to the ImageIndex. -func AppendManifests(base v1.ImageIndex, adds ...IndexAddendum) v1.ImageIndex { - return &index{ - base: base, - adds: adds, - } -} - -// RemoveManifests removes any descriptors that match the match.Matcher. -func RemoveManifests(base v1.ImageIndex, matcher match.Matcher) v1.ImageIndex { - return &index{ - base: base, - remove: matcher, - } -} - -// Config mutates the provided v1.Image to have the provided v1.Config -func Config(base v1.Image, cfg v1.Config) (v1.Image, error) { - cf, err := base.ConfigFile() - if err != nil { - return nil, err - } - - cf.Config = cfg - - return ConfigFile(base, cf) -} - -// Subject mutates the subject on an image or index manifest. -// -// The input is expected to be a v1.Image or v1.ImageIndex, and -// returns the same type. You can type-assert the result like so: -// -// img := Subject(empty.Image, subj).(v1.Image) -// -// Or for an index: -// -// idx := Subject(empty.Index, subj).(v1.ImageIndex) -// -// If the input is not an Image or ImageIndex, the result will -// attempt to lazily annotate the raw manifest. -func Subject(f partial.WithRawManifest, subject v1.Descriptor) partial.WithRawManifest { - if img, ok := f.(v1.Image); ok { - return &image{ - base: img, - subject: &subject, - } - } - if idx, ok := f.(v1.ImageIndex); ok { - return &index{ - base: idx, - subject: &subject, - } - } - return arbitraryRawManifest{a: f, subject: &subject} -} - -// Annotations mutates the annotations on an annotatable image or index manifest. -// -// The annotatable input is expected to be a v1.Image or v1.ImageIndex, and -// returns the same type. You can type-assert the result like so: -// -// img := Annotations(empty.Image, map[string]string{ -// "foo": "bar", -// }).(v1.Image) -// -// Or for an index: -// -// idx := Annotations(empty.Index, map[string]string{ -// "foo": "bar", -// }).(v1.ImageIndex) -// -// If the input Annotatable is not an Image or ImageIndex, the result will -// attempt to lazily annotate the raw manifest. -func Annotations(f partial.WithRawManifest, anns map[string]string) partial.WithRawManifest { - if img, ok := f.(v1.Image); ok { - return &image{ - base: img, - annotations: anns, - } - } - if idx, ok := f.(v1.ImageIndex); ok { - return &index{ - base: idx, - annotations: anns, - } - } - return arbitraryRawManifest{a: f, anns: anns} -} - -type arbitraryRawManifest struct { - a partial.WithRawManifest - anns map[string]string - subject *v1.Descriptor -} - -func (a arbitraryRawManifest) RawManifest() ([]byte, error) { - b, err := a.a.RawManifest() - if err != nil { - return nil, err - } - var m map[string]any - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - if ann, ok := m["annotations"]; ok { - if annm, ok := ann.(map[string]string); ok { - for k, v := range a.anns { - annm[k] = v - } - } else { - return nil, fmt.Errorf(".annotations is not a map: %T", ann) - } - } else { - m["annotations"] = a.anns - } - if a.subject != nil { - m["subject"] = a.subject - } - return json.Marshal(m) -} - -// ConfigFile mutates the provided v1.Image to have the provided v1.ConfigFile -func ConfigFile(base v1.Image, cfg *v1.ConfigFile) (v1.Image, error) { - m, err := base.Manifest() - if err != nil { - return nil, err - } - - image := &image{ - base: base, - manifest: m.DeepCopy(), - configFile: cfg, - } - - return image, nil -} - -// CreatedAt mutates the provided v1.Image to have the provided v1.Time -func CreatedAt(base v1.Image, created v1.Time) (v1.Image, error) { - cf, err := base.ConfigFile() - if err != nil { - return nil, err - } - - cfg := cf.DeepCopy() - cfg.Created = created - - return ConfigFile(base, cfg) -} - -// Extract takes an image and returns an io.ReadCloser containing the image's -// flattened filesystem. -// -// Callers can read the filesystem contents by passing the reader to -// tar.NewReader, or io.Copy it directly to some output. -// -// If a caller doesn't read the full contents, they should Close it to free up -// resources used during extraction. -func Extract(img v1.Image) io.ReadCloser { - pr, pw := io.Pipe() - - go func() { - // Close the writer with any errors encountered during - // extraction. These errors will be returned by the reader end - // on subsequent reads. If err == nil, the reader will return - // EOF. - pw.CloseWithError(extract(img, pw)) - }() - - return pr -} - -// Adapted from https://github.com/google/containerregistry/blob/da03b395ccdc4e149e34fbb540483efce962dc64/client/v2_2/docker_image_.py#L816 -func extract(img v1.Image, w io.Writer) error { - tarWriter := tar.NewWriter(w) - defer tarWriter.Close() - - fileMap := map[string]bool{} - - layers, err := img.Layers() - if err != nil { - return fmt.Errorf("retrieving image layers: %w", err) - } - - // we iterate through the layers in reverse order because it makes handling - // whiteout layers more efficient, since we can just keep track of the removed - // files as we see .wh. layers and ignore those in previous layers. - for i := len(layers) - 1; i >= 0; i-- { - layer := layers[i] - layerReader, err := layer.Uncompressed() - if err != nil { - return fmt.Errorf("reading layer contents: %w", err) - } - defer layerReader.Close() - tarReader := tar.NewReader(layerReader) - for { - header, err := tarReader.Next() - if errors.Is(err, io.EOF) { - break - } - if err != nil { - return fmt.Errorf("reading tar: %w", err) - } - - // Some tools prepend everything with "./", so if we don't Clean the - // name, we may have duplicate entries, which angers tar-split. - header.Name = filepath.Clean(header.Name) - // force PAX format to remove Name/Linkname length limit of 100 characters - // required by USTAR and to not depend on internal tar package guess which - // prefers USTAR over PAX - header.Format = tar.FormatPAX - - basename := filepath.Base(header.Name) - dirname := filepath.Dir(header.Name) - tombstone := strings.HasPrefix(basename, whiteoutPrefix) - if tombstone { - basename = basename[len(whiteoutPrefix):] - } - - // check if we have seen value before - // if we're checking a directory, don't filepath.Join names - var name string - if header.Typeflag == tar.TypeDir { - name = header.Name - } else { - name = filepath.Join(dirname, basename) - } - - if _, ok := fileMap[name]; ok { - continue - } - - // check for a whited out parent directory - if inWhiteoutDir(fileMap, name) { - continue - } - - // mark file as handled. non-directory implicitly tombstones - // any entries with a matching (or child) name - fileMap[name] = tombstone || !(header.Typeflag == tar.TypeDir) - if !tombstone { - if err := tarWriter.WriteHeader(header); err != nil { - return err - } - if header.Size > 0 { - if _, err := io.CopyN(tarWriter, tarReader, header.Size); err != nil { - return err - } - } - } - } - } - return nil -} - -func inWhiteoutDir(fileMap map[string]bool, file string) bool { - for { - if file == "" { - break - } - dirname := filepath.Dir(file) - if file == dirname { - break - } - if val, ok := fileMap[dirname]; ok && val { - return true - } - file = dirname - } - return false -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -// Time sets all timestamps in an image to the given timestamp. -func Time(img v1.Image, t time.Time) (v1.Image, error) { - newImage := empty.Image - - layers, err := img.Layers() - if err != nil { - return nil, fmt.Errorf("getting image layers: %w", err) - } - - ocf, err := img.ConfigFile() - if err != nil { - return nil, fmt.Errorf("getting original config file: %w", err) - } - - addendums := make([]Addendum, max(len(ocf.History), len(layers))) - var historyIdx, addendumIdx int - for layerIdx := 0; layerIdx < len(layers); addendumIdx, layerIdx = addendumIdx+1, layerIdx+1 { - newLayer, err := layerTime(layers[layerIdx], t) - if err != nil { - return nil, fmt.Errorf("setting layer times: %w", err) - } - - // try to search for the history entry that corresponds to this layer - for ; historyIdx < len(ocf.History); historyIdx++ { - addendums[addendumIdx].History = ocf.History[historyIdx] - // if it's an EmptyLayer, do not set the Layer and have the Addendum with just the History - // and move on to the next History entry - if ocf.History[historyIdx].EmptyLayer { - addendumIdx++ - continue - } - // otherwise, we can exit from the cycle - historyIdx++ - break - } - if addendumIdx < len(addendums) { - addendums[addendumIdx].Layer = newLayer - } - } - - // add all leftover History entries - for ; historyIdx < len(ocf.History); historyIdx, addendumIdx = historyIdx+1, addendumIdx+1 { - addendums[addendumIdx].History = ocf.History[historyIdx] - } - - newImage, err = Append(newImage, addendums...) - if err != nil { - return nil, fmt.Errorf("appending layers: %w", err) - } - - cf, err := newImage.ConfigFile() - if err != nil { - return nil, fmt.Errorf("setting config file: %w", err) - } - - cfg := cf.DeepCopy() - - // Copy basic config over - cfg.Architecture = ocf.Architecture - cfg.OS = ocf.OS - cfg.OSVersion = ocf.OSVersion - cfg.Config = ocf.Config - - // Strip away timestamps from the config file - cfg.Created = v1.Time{Time: t} - - for i, h := range cfg.History { - h.Created = v1.Time{Time: t} - h.CreatedBy = ocf.History[i].CreatedBy - h.Comment = ocf.History[i].Comment - h.EmptyLayer = ocf.History[i].EmptyLayer - // Explicitly ignore Author field; which hinders reproducibility - h.Author = "" - cfg.History[i] = h - } - - return ConfigFile(newImage, cfg) -} - -func layerTime(layer v1.Layer, t time.Time) (v1.Layer, error) { - layerReader, err := layer.Uncompressed() - if err != nil { - return nil, fmt.Errorf("getting layer: %w", err) - } - defer layerReader.Close() - w := new(bytes.Buffer) - tarWriter := tar.NewWriter(w) - defer tarWriter.Close() - - tarReader := tar.NewReader(layerReader) - for { - header, err := tarReader.Next() - if errors.Is(err, io.EOF) { - break - } - if err != nil { - return nil, fmt.Errorf("reading layer: %w", err) - } - - header.ModTime = t - - //PAX and GNU Format support additional timestamps in the header - if header.Format == tar.FormatPAX || header.Format == tar.FormatGNU { - header.AccessTime = t - header.ChangeTime = t - } - - if err := tarWriter.WriteHeader(header); err != nil { - return nil, fmt.Errorf("writing tar header: %w", err) - } - - if header.Typeflag == tar.TypeReg { - // TODO(#1168): This should be lazy, and not buffer the entire layer contents. - if _, err = io.CopyN(tarWriter, tarReader, header.Size); err != nil { - return nil, fmt.Errorf("writing layer file: %w", err) - } - } - } - - if err := tarWriter.Close(); err != nil { - return nil, err - } - - b := w.Bytes() - // gzip the contents, then create the layer - opener := func() (io.ReadCloser, error) { - return gzip.ReadCloser(io.NopCloser(bytes.NewReader(b))), nil - } - layer, err = tarball.LayerFromOpener(opener) - if err != nil { - return nil, fmt.Errorf("creating layer: %w", err) - } - - return layer, nil -} - -// Canonical is a helper function to combine Time and configFile -// to remove any randomness during a docker build. -func Canonical(img v1.Image) (v1.Image, error) { - // Set all timestamps to 0 - created := time.Time{} - img, err := Time(img, created) - if err != nil { - return nil, err - } - - cf, err := img.ConfigFile() - if err != nil { - return nil, err - } - - // Get rid of host-dependent random config - cfg := cf.DeepCopy() - - cfg.Container = "" - cfg.Config.Hostname = "" - cfg.DockerVersion = "" - - return ConfigFile(img, cfg) -} - -// MediaType modifies the MediaType() of the given image. -func MediaType(img v1.Image, mt types.MediaType) v1.Image { - return &image{ - base: img, - mediaType: &mt, - } -} - -// ConfigMediaType modifies the MediaType() of the given image's Config. -// -// If !mt.IsConfig(), this will be the image's artifactType in any indexes it's a part of. -func ConfigMediaType(img v1.Image, mt types.MediaType) v1.Image { - return &image{ - base: img, - configMediaType: &mt, - } -} - -// IndexMediaType modifies the MediaType() of the given index. -func IndexMediaType(idx v1.ImageIndex, mt types.MediaType) v1.ImageIndex { - return &index{ - base: idx, - mediaType: &mt, - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go deleted file mode 100644 index c606e0b76e..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mutate - -import ( - "fmt" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/empty" -) - -// Rebase returns a new v1.Image where the oldBase in orig is replaced by newBase. -func Rebase(orig, oldBase, newBase v1.Image) (v1.Image, error) { - // Verify that oldBase's layers are present in orig, otherwise orig is - // not based on oldBase at all. - origLayers, err := orig.Layers() - if err != nil { - return nil, fmt.Errorf("failed to get layers for original: %w", err) - } - oldBaseLayers, err := oldBase.Layers() - if err != nil { - return nil, err - } - if len(oldBaseLayers) > len(origLayers) { - return nil, fmt.Errorf("image %q is not based on %q (too few layers)", orig, oldBase) - } - for i, l := range oldBaseLayers { - oldLayerDigest, err := l.Digest() - if err != nil { - return nil, fmt.Errorf("failed to get digest of layer %d of %q: %w", i, oldBase, err) - } - origLayerDigest, err := origLayers[i].Digest() - if err != nil { - return nil, fmt.Errorf("failed to get digest of layer %d of %q: %w", i, orig, err) - } - if oldLayerDigest != origLayerDigest { - return nil, fmt.Errorf("image %q is not based on %q (layer %d mismatch)", orig, oldBase, i) - } - } - - oldConfig, err := oldBase.ConfigFile() - if err != nil { - return nil, fmt.Errorf("failed to get config for old base: %w", err) - } - - origConfig, err := orig.ConfigFile() - if err != nil { - return nil, fmt.Errorf("failed to get config for original: %w", err) - } - - newConfig, err := newBase.ConfigFile() - if err != nil { - return nil, fmt.Errorf("could not get config for new base: %w", err) - } - - // Stitch together an image that contains: - // - original image's config - // - new base image's os/arch properties - // - new base image's layers + top of original image's layers - // - new base image's history + top of original image's history - rebasedImage, err := Config(empty.Image, *origConfig.Config.DeepCopy()) - if err != nil { - return nil, fmt.Errorf("failed to create empty image with original config: %w", err) - } - - // Add new config properties from existing images. - rebasedConfig, err := rebasedImage.ConfigFile() - if err != nil { - return nil, fmt.Errorf("could not get config for rebased image: %w", err) - } - // OS/Arch properties from new base - rebasedConfig.Architecture = newConfig.Architecture - rebasedConfig.OS = newConfig.OS - rebasedConfig.OSVersion = newConfig.OSVersion - - // Apply config properties to rebased. - rebasedImage, err = ConfigFile(rebasedImage, rebasedConfig) - if err != nil { - return nil, fmt.Errorf("failed to replace config for rebased image: %w", err) - } - - // Get new base layers and config for history. - newBaseLayers, err := newBase.Layers() - if err != nil { - return nil, fmt.Errorf("could not get new base layers for new base: %w", err) - } - // Add new base layers. - rebasedImage, err = Append(rebasedImage, createAddendums(0, 0, newConfig.History, newBaseLayers)...) - if err != nil { - return nil, fmt.Errorf("failed to append new base image: %w", err) - } - - // Add original layers above the old base. - rebasedImage, err = Append(rebasedImage, createAddendums(len(oldConfig.History), len(oldBaseLayers)+1, origConfig.History, origLayers)...) - if err != nil { - return nil, fmt.Errorf("failed to append original image: %w", err) - } - - return rebasedImage, nil -} - -// createAddendums makes a list of addendums from a history and layers starting from a specific history and layer -// indexes. -func createAddendums(startHistory, startLayer int, history []v1.History, layers []v1.Layer) []Addendum { - var adds []Addendum - // History should be a superset of layers; empty layers (e.g. ENV statements) only exist in history. - // They cannot be iterated identically but must be walked independently, only advancing the iterator for layers - // when a history entry for a non-empty layer is seen. - layerIndex := 0 - for historyIndex := range history { - var layer v1.Layer - emptyLayer := history[historyIndex].EmptyLayer - if !emptyLayer { - layer = layers[layerIndex] - layerIndex++ - } - if historyIndex >= startHistory || layerIndex >= startLayer { - adds = append(adds, Addendum{ - Layer: layer, - History: history[historyIndex], - }) - } - } - // In the event history was malformed or non-existent, append the remaining layers. - for i := layerIndex; i < len(layers); i++ { - if i >= startLayer { - adds = append(adds, Addendum{Layer: layers[layerIndex]}) - } - } - - return adds -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md deleted file mode 100644 index 53ebbc6ccf..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# `partial` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial) - -## Partial Implementations - -There are roughly two kinds of image representations: compressed and uncompressed. - -The implementations for these kinds of images are almost identical, with the only -major difference being how blobs (config and layers) are fetched. This common -code lives in this package, where you provide a _partial_ implementation of a -compressed or uncompressed image, and you get back a full `v1.Image` implementation. - -### Examples - -In a registry, blobs are compressed, so it's easiest to implement a `v1.Image` in terms -of compressed layers. `remote.remoteImage` does this by implementing `CompressedImageCore`: - -```go -type CompressedImageCore interface { - RawConfigFile() ([]byte, error) - MediaType() (types.MediaType, error) - RawManifest() ([]byte, error) - LayerByDigest(v1.Hash) (CompressedLayer, error) -} -``` - -In a tarball, blobs are (often) uncompressed, so it's easiest to implement a `v1.Image` in terms -of uncompressed layers. `tarball.uncompressedImage` does this by implementing `UncompressedImageCore`: - -```go -type UncompressedImageCore interface { - RawConfigFile() ([]byte, error) - MediaType() (types.MediaType, error) - LayerByDiffID(v1.Hash) (UncompressedLayer, error) -} -``` - -## Optional Methods - -Where possible, we access some information via optional methods as an optimization. - -### [`partial.Descriptor`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#Descriptor) - -There are some properties of a [`Descriptor`](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) that aren't derivable from just image data: - -* `MediaType` -* `Platform` -* `URLs` -* `Annotations` - -For example, in a `tarball.Image`, there is a `LayerSources` field that contains -an entire layer descriptor with `URLs` information for foreign layers. This -information can be passed through to callers by implementing this optional -`Descriptor` method. - -See [`#654`](https://github.com/google/go-containerregistry/pull/654). - -### [`partial.UncompressedSize`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#UncompressedSize) - -Usually, you don't need to know the uncompressed size of a layer, since that -information isn't stored in a config file (just he sha256 is needed); however, -there are cases where it is very helpful to know the layer size, e.g. when -writing the uncompressed layer into a tarball. - -See [`#655`](https://github.com/google/go-containerregistry/pull/655). - -### [`partial.Exists`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#Exists) - -We generally don't care about the existence of something as granular as a -layer, and would rather ensure all the invariants of an image are upheld via -the `validate` package. However, there are situations where we want to do a -quick smoke test to ensure that the underlying storage engine hasn't been -corrupted by something e.g. deleting files or blobs. Thus, we've exposed an -optional `Exists` method that does an existence check without actually reading -any bytes. - -The `remote` package implements this via `HEAD` requests. - -The `layout` package implements this via `os.Stat`. - -See [`#838`](https://github.com/google/go-containerregistry/pull/838). diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go deleted file mode 100644 index 44989ac968..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package partial - -import ( - "io" - - "github.com/google/go-containerregistry/internal/and" - "github.com/google/go-containerregistry/internal/compression" - "github.com/google/go-containerregistry/internal/gzip" - "github.com/google/go-containerregistry/internal/zstd" - comp "github.com/google/go-containerregistry/pkg/compression" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// CompressedLayer represents the bare minimum interface a natively -// compressed layer must implement for us to produce a v1.Layer -type CompressedLayer interface { - // Digest returns the Hash of the compressed layer. - Digest() (v1.Hash, error) - - // Compressed returns an io.ReadCloser for the compressed layer contents. - Compressed() (io.ReadCloser, error) - - // Size returns the compressed size of the Layer. - Size() (int64, error) - - // Returns the mediaType for the compressed Layer - MediaType() (types.MediaType, error) -} - -// compressedLayerExtender implements v1.Image using the compressed base properties. -type compressedLayerExtender struct { - CompressedLayer -} - -// Uncompressed implements v1.Layer -func (cle *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) { - rc, err := cle.Compressed() - if err != nil { - return nil, err - } - - // Often, the "compressed" bytes are not actually-compressed. - // Peek at the first two bytes to determine whether it's correct to - // wrap this with gzip.UnzipReadCloser or zstd.UnzipReadCloser. - cp, pr, err := compression.PeekCompression(rc) - if err != nil { - return nil, err - } - - prc := &and.ReadCloser{ - Reader: pr, - CloseFunc: rc.Close, - } - - switch cp { - case comp.GZip: - return gzip.UnzipReadCloser(prc) - case comp.ZStd: - return zstd.UnzipReadCloser(prc) - default: - return prc, nil - } -} - -// DiffID implements v1.Layer -func (cle *compressedLayerExtender) DiffID() (v1.Hash, error) { - // If our nested CompressedLayer implements DiffID, - // then delegate to it instead. - if wdi, ok := cle.CompressedLayer.(WithDiffID); ok { - return wdi.DiffID() - } - r, err := cle.Uncompressed() - if err != nil { - return v1.Hash{}, err - } - defer r.Close() - h, _, err := v1.SHA256(r) - return h, err -} - -// CompressedToLayer fills in the missing methods from a CompressedLayer so that it implements v1.Layer -func CompressedToLayer(ul CompressedLayer) (v1.Layer, error) { - return &compressedLayerExtender{ul}, nil -} - -// CompressedImageCore represents the base minimum interface a natively -// compressed image must implement for us to produce a v1.Image. -type CompressedImageCore interface { - ImageCore - - // RawManifest returns the serialized bytes of the manifest. - RawManifest() ([]byte, error) - - // LayerByDigest is a variation on the v1.Image method, which returns - // a CompressedLayer instead. - LayerByDigest(v1.Hash) (CompressedLayer, error) -} - -// compressedImageExtender implements v1.Image by extending CompressedImageCore with the -// appropriate methods computed from the minimal core. -type compressedImageExtender struct { - CompressedImageCore -} - -// Assert that our extender type completes the v1.Image interface -var _ v1.Image = (*compressedImageExtender)(nil) - -// Digest implements v1.Image -func (i *compressedImageExtender) Digest() (v1.Hash, error) { - return Digest(i) -} - -// ConfigName implements v1.Image -func (i *compressedImageExtender) ConfigName() (v1.Hash, error) { - return ConfigName(i) -} - -// Layers implements v1.Image -func (i *compressedImageExtender) Layers() ([]v1.Layer, error) { - hs, err := FSLayers(i) - if err != nil { - return nil, err - } - ls := make([]v1.Layer, 0, len(hs)) - for _, h := range hs { - l, err := i.LayerByDigest(h) - if err != nil { - return nil, err - } - ls = append(ls, l) - } - return ls, nil -} - -// LayerByDigest implements v1.Image -func (i *compressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { - cl, err := i.CompressedImageCore.LayerByDigest(h) - if err != nil { - return nil, err - } - return CompressedToLayer(cl) -} - -// LayerByDiffID implements v1.Image -func (i *compressedImageExtender) LayerByDiffID(h v1.Hash) (v1.Layer, error) { - h, err := DiffIDToBlob(i, h) - if err != nil { - return nil, err - } - return i.LayerByDigest(h) -} - -// ConfigFile implements v1.Image -func (i *compressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { - return ConfigFile(i) -} - -// Manifest implements v1.Image -func (i *compressedImageExtender) Manifest() (*v1.Manifest, error) { - return Manifest(i) -} - -// Size implements v1.Image -func (i *compressedImageExtender) Size() (int64, error) { - return Size(i) -} - -// CompressedToImage fills in the missing methods from a CompressedImageCore so that it implements v1.Image -func CompressedToImage(cic CompressedImageCore) (v1.Image, error) { - return &compressedImageExtender{ - CompressedImageCore: cic, - }, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go deleted file mode 100644 index 153dfe4d53..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package partial defines methods for building up a v1.Image from -// minimal subsets that are sufficient for defining a v1.Image. -package partial diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go deleted file mode 100644 index c65f45e0dc..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package partial - -import ( - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// ImageCore is the core set of properties without which we cannot build a v1.Image -type ImageCore interface { - // RawConfigFile returns the serialized bytes of this image's config file. - RawConfigFile() ([]byte, error) - - // MediaType of this image's manifest. - MediaType() (types.MediaType, error) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go deleted file mode 100644 index 10cfb2b2f6..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2020 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package partial - -import ( - "fmt" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/match" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// FindManifests given a v1.ImageIndex, find the manifests that fit the matcher. -func FindManifests(index v1.ImageIndex, matcher match.Matcher) ([]v1.Descriptor, error) { - // get the actual manifest list - indexManifest, err := index.IndexManifest() - if err != nil { - return nil, fmt.Errorf("unable to get raw index: %w", err) - } - manifests := []v1.Descriptor{} - // try to get the root of our image - for _, manifest := range indexManifest.Manifests { - if matcher(manifest) { - manifests = append(manifests, manifest) - } - } - return manifests, nil -} - -// FindImages given a v1.ImageIndex, find the images that fit the matcher. If a Descriptor -// matches the provider Matcher, but the referenced item is not an Image, ignores it. -// Only returns those that match the Matcher and are images. -func FindImages(index v1.ImageIndex, matcher match.Matcher) ([]v1.Image, error) { - matches := []v1.Image{} - manifests, err := FindManifests(index, matcher) - if err != nil { - return nil, err - } - for _, desc := range manifests { - // if it is not an image, ignore it - if !desc.MediaType.IsImage() { - continue - } - img, err := index.Image(desc.Digest) - if err != nil { - return nil, err - } - matches = append(matches, img) - } - return matches, nil -} - -// FindIndexes given a v1.ImageIndex, find the indexes that fit the matcher. If a Descriptor -// matches the provider Matcher, but the referenced item is not an Index, ignores it. -// Only returns those that match the Matcher and are indexes. -func FindIndexes(index v1.ImageIndex, matcher match.Matcher) ([]v1.ImageIndex, error) { - matches := []v1.ImageIndex{} - manifests, err := FindManifests(index, matcher) - if err != nil { - return nil, err - } - for _, desc := range manifests { - if !desc.MediaType.IsIndex() { - continue - } - // if it is not an index, ignore it - idx, err := index.ImageIndex(desc.Digest) - if err != nil { - return nil, err - } - matches = append(matches, idx) - } - return matches, nil -} - -type withManifests interface { - Manifests() ([]Describable, error) -} - -type withLayer interface { - Layer(v1.Hash) (v1.Layer, error) -} - -type describable struct { - desc v1.Descriptor -} - -func (d describable) Digest() (v1.Hash, error) { - return d.desc.Digest, nil -} - -func (d describable) Size() (int64, error) { - return d.desc.Size, nil -} - -func (d describable) MediaType() (types.MediaType, error) { - return d.desc.MediaType, nil -} - -func (d describable) Descriptor() (*v1.Descriptor, error) { - return &d.desc, nil -} - -// Manifests is analogous to v1.Image.Layers in that it allows values in the -// returned list to be lazily evaluated, which enables an index to contain -// an image that contains a streaming layer. -// -// This should have been part of the v1.ImageIndex interface, but wasn't. -// It is instead usable through this extension interface. -func Manifests(idx v1.ImageIndex) ([]Describable, error) { - if wm, ok := idx.(withManifests); ok { - return wm.Manifests() - } - - return ComputeManifests(idx) -} - -// ComputeManifests provides a fallback implementation for Manifests. -func ComputeManifests(idx v1.ImageIndex) ([]Describable, error) { - m, err := idx.IndexManifest() - if err != nil { - return nil, err - } - manifests := []Describable{} - for _, desc := range m.Manifests { - switch { - case desc.MediaType.IsImage(): - img, err := idx.Image(desc.Digest) - if err != nil { - return nil, err - } - manifests = append(manifests, img) - case desc.MediaType.IsIndex(): - idx, err := idx.ImageIndex(desc.Digest) - if err != nil { - return nil, err - } - manifests = append(manifests, idx) - default: - if wl, ok := idx.(withLayer); ok { - layer, err := wl.Layer(desc.Digest) - if err != nil { - return nil, err - } - manifests = append(manifests, layer) - } else { - manifests = append(manifests, describable{desc}) - } - } - } - - return manifests, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go deleted file mode 100644 index df20d3aa9e..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package partial - -import ( - "bytes" - "io" - "sync" - - "github.com/google/go-containerregistry/internal/gzip" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// UncompressedLayer represents the bare minimum interface a natively -// uncompressed layer must implement for us to produce a v1.Layer -type UncompressedLayer interface { - // DiffID returns the Hash of the uncompressed layer. - DiffID() (v1.Hash, error) - - // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. - Uncompressed() (io.ReadCloser, error) - - // Returns the mediaType for the compressed Layer - MediaType() (types.MediaType, error) -} - -// uncompressedLayerExtender implements v1.Image using the uncompressed base properties. -type uncompressedLayerExtender struct { - UncompressedLayer - // Memoize size/hash so that the methods aren't twice as - // expensive as doing this manually. - hash v1.Hash - size int64 - hashSizeError error - once sync.Once -} - -// Compressed implements v1.Layer -func (ule *uncompressedLayerExtender) Compressed() (io.ReadCloser, error) { - u, err := ule.Uncompressed() - if err != nil { - return nil, err - } - return gzip.ReadCloser(u), nil -} - -// Digest implements v1.Layer -func (ule *uncompressedLayerExtender) Digest() (v1.Hash, error) { - ule.calcSizeHash() - return ule.hash, ule.hashSizeError -} - -// Size implements v1.Layer -func (ule *uncompressedLayerExtender) Size() (int64, error) { - ule.calcSizeHash() - return ule.size, ule.hashSizeError -} - -func (ule *uncompressedLayerExtender) calcSizeHash() { - ule.once.Do(func() { - var r io.ReadCloser - r, ule.hashSizeError = ule.Compressed() - if ule.hashSizeError != nil { - return - } - defer r.Close() - ule.hash, ule.size, ule.hashSizeError = v1.SHA256(r) - }) -} - -// UncompressedToLayer fills in the missing methods from an UncompressedLayer so that it implements v1.Layer -func UncompressedToLayer(ul UncompressedLayer) (v1.Layer, error) { - return &uncompressedLayerExtender{UncompressedLayer: ul}, nil -} - -// UncompressedImageCore represents the bare minimum interface a natively -// uncompressed image must implement for us to produce a v1.Image -type UncompressedImageCore interface { - ImageCore - - // LayerByDiffID is a variation on the v1.Image method, which returns - // an UncompressedLayer instead. - LayerByDiffID(v1.Hash) (UncompressedLayer, error) -} - -// UncompressedToImage fills in the missing methods from an UncompressedImageCore so that it implements v1.Image. -func UncompressedToImage(uic UncompressedImageCore) (v1.Image, error) { - return &uncompressedImageExtender{ - UncompressedImageCore: uic, - }, nil -} - -// uncompressedImageExtender implements v1.Image by extending UncompressedImageCore with the -// appropriate methods computed from the minimal core. -type uncompressedImageExtender struct { - UncompressedImageCore - - lock sync.Mutex - manifest *v1.Manifest -} - -// Assert that our extender type completes the v1.Image interface -var _ v1.Image = (*uncompressedImageExtender)(nil) - -// Digest implements v1.Image -func (i *uncompressedImageExtender) Digest() (v1.Hash, error) { - return Digest(i) -} - -// Manifest implements v1.Image -func (i *uncompressedImageExtender) Manifest() (*v1.Manifest, error) { - i.lock.Lock() - defer i.lock.Unlock() - if i.manifest != nil { - return i.manifest, nil - } - - b, err := i.RawConfigFile() - if err != nil { - return nil, err - } - - cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) - if err != nil { - return nil, err - } - - m := &v1.Manifest{ - SchemaVersion: 2, - MediaType: types.DockerManifestSchema2, - Config: v1.Descriptor{ - MediaType: types.DockerConfigJSON, - Size: cfgSize, - Digest: cfgHash, - }, - } - - ls, err := i.Layers() - if err != nil { - return nil, err - } - - m.Layers = make([]v1.Descriptor, len(ls)) - for i, l := range ls { - desc, err := Descriptor(l) - if err != nil { - return nil, err - } - - m.Layers[i] = *desc - } - - i.manifest = m - return i.manifest, nil -} - -// RawManifest implements v1.Image -func (i *uncompressedImageExtender) RawManifest() ([]byte, error) { - return RawManifest(i) -} - -// Size implements v1.Image -func (i *uncompressedImageExtender) Size() (int64, error) { - return Size(i) -} - -// ConfigName implements v1.Image -func (i *uncompressedImageExtender) ConfigName() (v1.Hash, error) { - return ConfigName(i) -} - -// ConfigFile implements v1.Image -func (i *uncompressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { - return ConfigFile(i) -} - -// Layers implements v1.Image -func (i *uncompressedImageExtender) Layers() ([]v1.Layer, error) { - diffIDs, err := DiffIDs(i) - if err != nil { - return nil, err - } - ls := make([]v1.Layer, 0, len(diffIDs)) - for _, h := range diffIDs { - l, err := i.LayerByDiffID(h) - if err != nil { - return nil, err - } - ls = append(ls, l) - } - return ls, nil -} - -// LayerByDiffID implements v1.Image -func (i *uncompressedImageExtender) LayerByDiffID(diffID v1.Hash) (v1.Layer, error) { - ul, err := i.UncompressedImageCore.LayerByDiffID(diffID) - if err != nil { - return nil, err - } - return UncompressedToLayer(ul) -} - -// LayerByDigest implements v1.Image -func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { - diffID, err := BlobToDiffID(i, h) - if err != nil { - return nil, err - } - return i.LayerByDiffID(diffID) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go deleted file mode 100644 index c8b22b3f95..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package partial - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// WithRawConfigFile defines the subset of v1.Image used by these helper methods -type WithRawConfigFile interface { - // RawConfigFile returns the serialized bytes of this image's config file. - RawConfigFile() ([]byte, error) -} - -// ConfigFile is a helper for implementing v1.Image -func ConfigFile(i WithRawConfigFile) (*v1.ConfigFile, error) { - b, err := i.RawConfigFile() - if err != nil { - return nil, err - } - return v1.ParseConfigFile(bytes.NewReader(b)) -} - -// ConfigName is a helper for implementing v1.Image -func ConfigName(i WithRawConfigFile) (v1.Hash, error) { - b, err := i.RawConfigFile() - if err != nil { - return v1.Hash{}, err - } - h, _, err := v1.SHA256(bytes.NewReader(b)) - return h, err -} - -type configLayer struct { - hash v1.Hash - content []byte -} - -// Digest implements v1.Layer -func (cl *configLayer) Digest() (v1.Hash, error) { - return cl.hash, nil -} - -// DiffID implements v1.Layer -func (cl *configLayer) DiffID() (v1.Hash, error) { - return cl.hash, nil -} - -// Uncompressed implements v1.Layer -func (cl *configLayer) Uncompressed() (io.ReadCloser, error) { - return io.NopCloser(bytes.NewBuffer(cl.content)), nil -} - -// Compressed implements v1.Layer -func (cl *configLayer) Compressed() (io.ReadCloser, error) { - return io.NopCloser(bytes.NewBuffer(cl.content)), nil -} - -// Size implements v1.Layer -func (cl *configLayer) Size() (int64, error) { - return int64(len(cl.content)), nil -} - -func (cl *configLayer) MediaType() (types.MediaType, error) { - // Defaulting this to OCIConfigJSON as it should remain - // backwards compatible with DockerConfigJSON - return types.OCIConfigJSON, nil -} - -var _ v1.Layer = (*configLayer)(nil) - -// withConfigLayer allows partial image implementations to provide a layer -// for their config file. -type withConfigLayer interface { - ConfigLayer() (v1.Layer, error) -} - -// ConfigLayer implements v1.Layer from the raw config bytes. -// This is so that clients (e.g. remote) can access the config as a blob. -// -// Images that want to return a specific layer implementation can implement -// withConfigLayer. -func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) { - if wcl, ok := unwrap(i).(withConfigLayer); ok { - return wcl.ConfigLayer() - } - - h, err := ConfigName(i) - if err != nil { - return nil, err - } - rcfg, err := i.RawConfigFile() - if err != nil { - return nil, err - } - return &configLayer{ - hash: h, - content: rcfg, - }, nil -} - -// WithConfigFile defines the subset of v1.Image used by these helper methods -type WithConfigFile interface { - // ConfigFile returns this image's config file. - ConfigFile() (*v1.ConfigFile, error) -} - -// DiffIDs is a helper for implementing v1.Image -func DiffIDs(i WithConfigFile) ([]v1.Hash, error) { - cfg, err := i.ConfigFile() - if err != nil { - return nil, err - } - return cfg.RootFS.DiffIDs, nil -} - -// RawConfigFile is a helper for implementing v1.Image -func RawConfigFile(i WithConfigFile) ([]byte, error) { - cfg, err := i.ConfigFile() - if err != nil { - return nil, err - } - return json.Marshal(cfg) -} - -// WithRawManifest defines the subset of v1.Image used by these helper methods -type WithRawManifest interface { - // RawManifest returns the serialized bytes of this image's config file. - RawManifest() ([]byte, error) -} - -// Digest is a helper for implementing v1.Image -func Digest(i WithRawManifest) (v1.Hash, error) { - mb, err := i.RawManifest() - if err != nil { - return v1.Hash{}, err - } - digest, _, err := v1.SHA256(bytes.NewReader(mb)) - return digest, err -} - -// Manifest is a helper for implementing v1.Image -func Manifest(i WithRawManifest) (*v1.Manifest, error) { - b, err := i.RawManifest() - if err != nil { - return nil, err - } - return v1.ParseManifest(bytes.NewReader(b)) -} - -// WithManifest defines the subset of v1.Image used by these helper methods -type WithManifest interface { - // Manifest returns this image's Manifest object. - Manifest() (*v1.Manifest, error) -} - -// RawManifest is a helper for implementing v1.Image -func RawManifest(i WithManifest) ([]byte, error) { - m, err := i.Manifest() - if err != nil { - return nil, err - } - return json.Marshal(m) -} - -// Size is a helper for implementing v1.Image -func Size(i WithRawManifest) (int64, error) { - b, err := i.RawManifest() - if err != nil { - return -1, err - } - return int64(len(b)), nil -} - -// FSLayers is a helper for implementing v1.Image -func FSLayers(i WithManifest) ([]v1.Hash, error) { - m, err := i.Manifest() - if err != nil { - return nil, err - } - fsl := make([]v1.Hash, len(m.Layers)) - for i, l := range m.Layers { - fsl[i] = l.Digest - } - return fsl, nil -} - -// BlobSize is a helper for implementing v1.Image -func BlobSize(i WithManifest, h v1.Hash) (int64, error) { - d, err := BlobDescriptor(i, h) - if err != nil { - return -1, err - } - return d.Size, nil -} - -// BlobDescriptor is a helper for implementing v1.Image -func BlobDescriptor(i WithManifest, h v1.Hash) (*v1.Descriptor, error) { - m, err := i.Manifest() - if err != nil { - return nil, err - } - - if m.Config.Digest == h { - return &m.Config, nil - } - - for _, l := range m.Layers { - if l.Digest == h { - return &l, nil - } - } - return nil, fmt.Errorf("blob %v not found", h) -} - -// WithManifestAndConfigFile defines the subset of v1.Image used by these helper methods -type WithManifestAndConfigFile interface { - WithConfigFile - - // Manifest returns this image's Manifest object. - Manifest() (*v1.Manifest, error) -} - -// BlobToDiffID is a helper for mapping between compressed -// and uncompressed blob hashes. -func BlobToDiffID(i WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { - blobs, err := FSLayers(i) - if err != nil { - return v1.Hash{}, err - } - diffIDs, err := DiffIDs(i) - if err != nil { - return v1.Hash{}, err - } - if len(blobs) != len(diffIDs) { - return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) - } - for i, blob := range blobs { - if blob == h { - return diffIDs[i], nil - } - } - return v1.Hash{}, fmt.Errorf("unknown blob %v", h) -} - -// DiffIDToBlob is a helper for mapping between uncompressed -// and compressed blob hashes. -func DiffIDToBlob(wm WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { - blobs, err := FSLayers(wm) - if err != nil { - return v1.Hash{}, err - } - diffIDs, err := DiffIDs(wm) - if err != nil { - return v1.Hash{}, err - } - if len(blobs) != len(diffIDs) { - return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) - } - for i, diffID := range diffIDs { - if diffID == h { - return blobs[i], nil - } - } - return v1.Hash{}, fmt.Errorf("unknown diffID %v", h) -} - -// WithDiffID defines the subset of v1.Layer for exposing the DiffID method. -type WithDiffID interface { - DiffID() (v1.Hash, error) -} - -// withDescriptor allows partial layer implementations to provide a layer -// descriptor to the partial image manifest builder. This allows partial -// uncompressed layers to provide foreign layer metadata like URLs to the -// uncompressed image manifest. -type withDescriptor interface { - Descriptor() (*v1.Descriptor, error) -} - -// Describable represents something for which we can produce a v1.Descriptor. -type Describable interface { - Digest() (v1.Hash, error) - MediaType() (types.MediaType, error) - Size() (int64, error) -} - -// Descriptor returns a v1.Descriptor given a Describable. It also encodes -// some logic for unwrapping things that have been wrapped by -// CompressedToLayer, UncompressedToLayer, CompressedToImage, or -// UncompressedToImage. -func Descriptor(d Describable) (*v1.Descriptor, error) { - // If Describable implements Descriptor itself, return that. - if wd, ok := unwrap(d).(withDescriptor); ok { - return wd.Descriptor() - } - - // If all else fails, compute the descriptor from the individual methods. - var ( - desc v1.Descriptor - err error - ) - - if desc.Size, err = d.Size(); err != nil { - return nil, err - } - if desc.Digest, err = d.Digest(); err != nil { - return nil, err - } - if desc.MediaType, err = d.MediaType(); err != nil { - return nil, err - } - if wat, ok := d.(withArtifactType); ok { - if desc.ArtifactType, err = wat.ArtifactType(); err != nil { - return nil, err - } - } else { - if wrm, ok := d.(WithRawManifest); ok && desc.MediaType.IsImage() { - mf, _ := Manifest(wrm) - // Failing to parse as a manifest should just be ignored. - // The manifest might not be valid, and that's okay. - if mf != nil && !mf.Config.MediaType.IsConfig() { - desc.ArtifactType = string(mf.Config.MediaType) - } - } - } - - return &desc, nil -} - -type withArtifactType interface { - ArtifactType() (string, error) -} - -type withUncompressedSize interface { - UncompressedSize() (int64, error) -} - -// UncompressedSize returns the size of the Uncompressed layer. If the -// underlying implementation doesn't implement UncompressedSize directly, -// this will compute the uncompressedSize by reading everything returned -// by Compressed(). This is potentially expensive and may consume the contents -// for streaming layers. -func UncompressedSize(l v1.Layer) (int64, error) { - // If the layer implements UncompressedSize itself, return that. - if wus, ok := unwrap(l).(withUncompressedSize); ok { - return wus.UncompressedSize() - } - - // The layer doesn't implement UncompressedSize, we need to compute it. - rc, err := l.Uncompressed() - if err != nil { - return -1, err - } - defer rc.Close() - - return io.Copy(io.Discard, rc) -} - -type withExists interface { - Exists() (bool, error) -} - -// Exists checks to see if a layer exists. This is a hack to work around the -// mistakes of the partial package. Don't use this. -func Exists(l v1.Layer) (bool, error) { - // If the layer implements Exists itself, return that. - if we, ok := unwrap(l).(withExists); ok { - return we.Exists() - } - - // The layer doesn't implement Exists, so we hope that calling Compressed() - // is enough to trigger an error if the layer does not exist. - rc, err := l.Compressed() - if err != nil { - return false, err - } - defer rc.Close() - - // We may want to try actually reading a single byte, but if we need to do - // that, we should just fix this hack. - return true, nil -} - -// Recursively unwrap our wrappers so that we can check for the original implementation. -// We might want to expose this? -func unwrap(i any) any { - if ule, ok := i.(*uncompressedLayerExtender); ok { - return unwrap(ule.UncompressedLayer) - } - if cle, ok := i.(*compressedLayerExtender); ok { - return unwrap(cle.CompressedLayer) - } - if uie, ok := i.(*uncompressedImageExtender); ok { - return unwrap(uie.UncompressedImageCore) - } - if cie, ok := i.(*compressedImageExtender); ok { - return unwrap(cie.CompressedImageCore) - } - return i -} - -// ArtifactType returns the artifact type for the given manifest. -// -// If the manifest reports its own artifact type, that's returned, otherwise -// the manifest is parsed and, if successful, its config.mediaType is returned. -func ArtifactType(w WithManifest) (string, error) { - if wat, ok := w.(withArtifactType); ok { - return wat.ArtifactType() - } - mf, _ := w.Manifest() - // Failing to parse as a manifest should just be ignored. - // The manifest might not be valid, and that's okay. - if mf != nil && !mf.Config.MediaType.IsConfig() { - return string(mf.Config.MediaType), nil - } - return "", nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go b/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go deleted file mode 100644 index 59ca402698..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "fmt" - "sort" - "strings" -) - -// Platform represents the target os/arch for an image. -type Platform struct { - Architecture string `json:"architecture"` - OS string `json:"os"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - Variant string `json:"variant,omitempty"` - Features []string `json:"features,omitempty"` -} - -func (p Platform) String() string { - if p.OS == "" { - return "" - } - var b strings.Builder - b.WriteString(p.OS) - if p.Architecture != "" { - b.WriteString("/") - b.WriteString(p.Architecture) - } - if p.Variant != "" { - b.WriteString("/") - b.WriteString(p.Variant) - } - if p.OSVersion != "" { - b.WriteString(":") - b.WriteString(p.OSVersion) - } - return b.String() -} - -// ParsePlatform parses a string representing a Platform, if possible. -func ParsePlatform(s string) (*Platform, error) { - var p Platform - parts := strings.Split(strings.TrimSpace(s), ":") - if len(parts) == 2 { - p.OSVersion = parts[1] - } - parts = strings.Split(parts[0], "/") - if len(parts) > 0 { - p.OS = parts[0] - } - if len(parts) > 1 { - p.Architecture = parts[1] - } - if len(parts) > 2 { - p.Variant = parts[2] - } - if len(parts) > 3 { - return nil, fmt.Errorf("too many slashes in platform spec: %s", s) - } - return &p, nil -} - -// Equals returns true if the given platform is semantically equivalent to this one. -// The order of Features and OSFeatures is not important. -func (p Platform) Equals(o Platform) bool { - return p.OS == o.OS && - p.Architecture == o.Architecture && - p.Variant == o.Variant && - p.OSVersion == o.OSVersion && - stringSliceEqualIgnoreOrder(p.OSFeatures, o.OSFeatures) && - stringSliceEqualIgnoreOrder(p.Features, o.Features) -} - -// Satisfies returns true if this Platform "satisfies" the given spec Platform. -// -// Note that this is different from Equals and that Satisfies is not reflexive. -// -// The given spec represents "requirements" such that any missing values in the -// spec are not compared. -// -// For OSFeatures and Features, Satisfies will return true if this Platform's -// fields contain a superset of the values in the spec's fields (order ignored). -func (p Platform) Satisfies(spec Platform) bool { - return satisfies(spec.OS, p.OS) && - satisfies(spec.Architecture, p.Architecture) && - satisfies(spec.Variant, p.Variant) && - satisfies(spec.OSVersion, p.OSVersion) && - satisfiesList(spec.OSFeatures, p.OSFeatures) && - satisfiesList(spec.Features, p.Features) -} - -func satisfies(want, have string) bool { - return want == "" || want == have -} - -func satisfiesList(want, have []string) bool { - if len(want) == 0 { - return true - } - - set := map[string]struct{}{} - for _, h := range have { - set[h] = struct{}{} - } - - for _, w := range want { - if _, ok := set[w]; !ok { - return false - } - } - - return true -} - -// stringSliceEqual compares 2 string slices and returns if their contents are identical. -func stringSliceEqual(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i, elm := range a { - if elm != b[i] { - return false - } - } - return true -} - -// stringSliceEqualIgnoreOrder compares 2 string slices and returns if their contents are identical, ignoring order -func stringSliceEqualIgnoreOrder(a, b []string) bool { - if a != nil && b != nil { - sort.Strings(a) - sort.Strings(b) - } - return stringSliceEqual(a, b) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go b/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go deleted file mode 100644 index 844f04d937..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2020 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -// Update representation of an update of transfer progress. Some functions -// in this module can take a channel to which updates will be sent while a -// transfer is in progress. -// +k8s:deepcopy-gen=false -type Update struct { - Total int64 - Complete int64 - Error error -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/random/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/random/doc.go deleted file mode 100644 index d3712767d2..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/random/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package random provides a facility for synthesizing pseudo-random images. -package random diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/random/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/random/image.go deleted file mode 100644 index 6121aa715c..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/random/image.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package random - -import ( - "archive/tar" - "bytes" - "crypto" - "encoding/hex" - "fmt" - "io" - "math/rand" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/empty" - "github.com/google/go-containerregistry/pkg/v1/mutate" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// uncompressedLayer implements partial.UncompressedLayer from raw bytes. -type uncompressedLayer struct { - diffID v1.Hash - mediaType types.MediaType - content []byte -} - -// DiffID implements partial.UncompressedLayer -func (ul *uncompressedLayer) DiffID() (v1.Hash, error) { - return ul.diffID, nil -} - -// Uncompressed implements partial.UncompressedLayer -func (ul *uncompressedLayer) Uncompressed() (io.ReadCloser, error) { - return io.NopCloser(bytes.NewBuffer(ul.content)), nil -} - -// MediaType returns the media type of the layer -func (ul *uncompressedLayer) MediaType() (types.MediaType, error) { - return ul.mediaType, nil -} - -var _ partial.UncompressedLayer = (*uncompressedLayer)(nil) - -// Image returns a pseudo-randomly generated Image. -func Image(byteSize, layers int64, options ...Option) (v1.Image, error) { - adds := make([]mutate.Addendum, 0, 5) - for i := int64(0); i < layers; i++ { - layer, err := Layer(byteSize, types.DockerLayer, options...) - if err != nil { - return nil, err - } - adds = append(adds, mutate.Addendum{ - Layer: layer, - History: v1.History{ - Author: "random.Image", - Comment: fmt.Sprintf("this is a random history %d of %d", i, layers), - CreatedBy: "random", - }, - }) - } - - return mutate.Append(empty.Image, adds...) -} - -// Layer returns a layer with pseudo-randomly generated content. -func Layer(byteSize int64, mt types.MediaType, options ...Option) (v1.Layer, error) { - o := getOptions(options) - rng := rand.New(o.source) //nolint:gosec - - fileName := fmt.Sprintf("random_file_%d.txt", rng.Int()) - - // Hash the contents as we write it out to the buffer. - var b bytes.Buffer - hasher := crypto.SHA256.New() - mw := io.MultiWriter(&b, hasher) - - // Write a single file with a random name and random contents. - tw := tar.NewWriter(mw) - if err := tw.WriteHeader(&tar.Header{ - Name: fileName, - Size: byteSize, - Typeflag: tar.TypeReg, - }); err != nil { - return nil, err - } - if _, err := io.CopyN(tw, rng, byteSize); err != nil { - return nil, err - } - if err := tw.Close(); err != nil { - return nil, err - } - - h := v1.Hash{ - Algorithm: "sha256", - Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), - } - - return partial.UncompressedToLayer(&uncompressedLayer{ - diffID: h, - mediaType: mt, - content: b.Bytes(), - }) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/random/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/random/index.go deleted file mode 100644 index 4368bddff3..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/random/index.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package random - -import ( - "bytes" - "encoding/json" - "fmt" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -type randomIndex struct { - images map[v1.Hash]v1.Image - manifest *v1.IndexManifest -} - -// Index returns a pseudo-randomly generated ImageIndex with count images, each -// having the given number of layers of size byteSize. -func Index(byteSize, layers, count int64, options ...Option) (v1.ImageIndex, error) { - manifest := v1.IndexManifest{ - SchemaVersion: 2, - MediaType: types.OCIImageIndex, - Manifests: []v1.Descriptor{}, - } - - images := make(map[v1.Hash]v1.Image) - for i := int64(0); i < count; i++ { - img, err := Image(byteSize, layers, options...) - if err != nil { - return nil, err - } - - rawManifest, err := img.RawManifest() - if err != nil { - return nil, err - } - digest, size, err := v1.SHA256(bytes.NewReader(rawManifest)) - if err != nil { - return nil, err - } - mediaType, err := img.MediaType() - if err != nil { - return nil, err - } - - manifest.Manifests = append(manifest.Manifests, v1.Descriptor{ - Digest: digest, - Size: size, - MediaType: mediaType, - }) - - images[digest] = img - } - - return &randomIndex{ - images: images, - manifest: &manifest, - }, nil -} - -func (i *randomIndex) MediaType() (types.MediaType, error) { - return i.manifest.MediaType, nil -} - -func (i *randomIndex) Digest() (v1.Hash, error) { - return partial.Digest(i) -} - -func (i *randomIndex) Size() (int64, error) { - return partial.Size(i) -} - -func (i *randomIndex) IndexManifest() (*v1.IndexManifest, error) { - return i.manifest, nil -} - -func (i *randomIndex) RawManifest() ([]byte, error) { - m, err := i.IndexManifest() - if err != nil { - return nil, err - } - return json.Marshal(m) -} - -func (i *randomIndex) Image(h v1.Hash) (v1.Image, error) { - if img, ok := i.images[h]; ok { - return img, nil - } - - return nil, fmt.Errorf("image not found: %v", h) -} - -func (i *randomIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { - // This is a single level index (for now?). - return nil, fmt.Errorf("image not found: %v", h) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/random/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/random/options.go deleted file mode 100644 index af1d2f9695..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/random/options.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package random - -import "math/rand" - -// Option is an optional parameter to the random functions -type Option func(opts *options) - -type options struct { - source rand.Source - - // TODO opens the door to add this in the future - // algorithm digest.Algorithm -} - -func getOptions(opts []Option) *options { - // get a random seed - - // TODO in go 1.20 this is fine (it will be random) - seed := rand.Int63() //nolint:gosec - /* - // in prior go versions this needs to come from crypto/rand - var b [8]byte - _, err := crypto_rand.Read(b[:]) - if err != nil { - panic("cryptographically secure random number generator is not working") - } - seed := int64(binary.LittleEndian.Int64(b[:])) - */ - - // defaults - o := &options{ - source: rand.NewSource(seed), - } - - for _, opt := range opts { - opt(o) - } - return o -} - -// WithSource sets the random number generator source -func WithSource(source rand.Source) Option { - return func(opts *options) { - opts.source = source - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md deleted file mode 100644 index c1e81b310b..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# `remote` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) - -The `remote` package implements a client for accessing a registry, -per the [OCI distribution spec](https://github.com/opencontainers/distribution-spec/blob/master/spec.md). - -It leans heavily on the lower level [`transport`](/pkg/v1/remote/transport) package, which handles the -authentication handshake and structured errors. - -## Usage - -```go -package main - -import ( - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote" -) - -func main() { - ref, err := name.ParseReference("gcr.io/google-containers/pause") - if err != nil { - panic(err) - } - - img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain)) - if err != nil { - panic(err) - } - - // do stuff with img -} -``` - -## Structure - -

- -

- - -## Background - -There are a lot of confusingly similar terms that come up when talking about images in registries. - -### Anatomy of an image - -In general... - -* A tag refers to an image manifest. -* An image manifest references a config file and an orderered list of _compressed_ layers by sha256 digest. -* A config file references an ordered list of _uncompressed_ layers by sha256 digest and contains runtime configuration. -* The sha256 digest of the config file is the [image id](https://github.com/opencontainers/image-spec/blob/master/config.md#imageid) for the image. - -For example, an image with two layers would look something like this: - -![image anatomy](/images/image-anatomy.dot.svg) - -### Anatomy of an index - -In the normal case, an [index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) is used to represent a multi-platform image. -This was the original use case for a [manifest -list](https://docs.docker.com/registry/spec/manifest-v2-2/#manifest-list). - -![image index anatomy](/images/index-anatomy.dot.svg) - -It is possible for an index to reference another index, per the OCI -[image-spec](https://github.com/opencontainers/image-spec/blob/master/media-types.md#compatibility-matrix). -In theory, both an image and image index can reference arbitrary things via -[descriptors](https://github.com/opencontainers/image-spec/blob/master/descriptor.md), -e.g. see the [image layout -example](https://github.com/opencontainers/image-spec/blob/master/image-layout.md#index-example), -which references an application/xml file from an image index. - -That could look something like this: - -![strange image index anatomy](/images/index-anatomy-strange.dot.svg) - -Using a recursive index like this might not be possible with all registries, -but this flexibility allows for some interesting applications, e.g. the -[OCI Artifacts](https://github.com/opencontainers/artifacts) effort. - -### Anatomy of an image upload - -The structure of an image requires a delicate ordering when uploading an image to a registry. -Below is a (slightly simplified) figure that describes how an image is prepared for upload -to a registry and how the data flows between various artifacts: - -![upload](/images/upload.dot.svg) - -Note that: - -* A config file references the uncompressed layer contents by sha256. -* A manifest references the compressed layer contents by sha256 and the size of the layer. -* A manifest references the config file contents by sha256 and the size of the file. - -It follows that during an upload, we need to upload layers before the config file, -and we need to upload the config file before the manifest. - -Sometimes, we know all of this information ahead of time, (e.g. when copying from remote.Image), -so the ordering is less important. - -In other cases, e.g. when using a [`stream.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream#Layer), -we can't compute anything until we have already uploaded the layer, so we need to be careful about ordering. - -## Caveats - -### schema 1 - -This package does not support schema 1 images, see [`#377`](https://github.com/google/go-containerregistry/issues/377), -however, it's possible to do _something_ useful with them via [`remote.Get`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote#Get), -which doesn't try to interpret what is returned by the registry. - -[`crane.Copy`](https://godoc.org/github.com/google/go-containerregistry/pkg/crane#Copy) takes advantage of this to implement support for copying schema 1 images, -see [here](https://github.com/google/go-containerregistry/blob/main/pkg/internal/legacy/copy.go). diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go deleted file mode 100644 index a0281b9fd2..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" -) - -type Catalogs struct { - Repos []string `json:"repositories"` - Next string `json:"next,omitempty"` -} - -// CatalogPage calls /_catalog, returning the list of repositories on the registry. -func CatalogPage(target name.Registry, last string, n int, options ...Option) ([]string, error) { - o, err := makeOptions(options...) - if err != nil { - return nil, err - } - - f, err := newPuller(o).fetcher(o.context, target) - if err != nil { - return nil, err - } - - uri := url.URL{ - Scheme: target.Scheme(), - Host: target.RegistryStr(), - Path: "/v2/_catalog", - RawQuery: fmt.Sprintf("last=%s&n=%d", url.QueryEscape(last), n), - } - - req, err := http.NewRequest(http.MethodGet, uri.String(), nil) - if err != nil { - return nil, err - } - resp, err := f.client.Do(req.WithContext(o.context)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, err - } - - var parsed Catalogs - if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { - return nil, err - } - - return parsed.Repos, nil -} - -// Catalog calls /_catalog, returning the list of repositories on the registry. -func Catalog(ctx context.Context, target name.Registry, options ...Option) ([]string, error) { - o, err := makeOptions(options...) - if err != nil { - return nil, err - } - - // WithContext overrides the ctx passed directly. - if o.context != context.Background() { - ctx = o.context - } - - return newPuller(o).catalog(ctx, target, o.pageSize) -} - -func (f *fetcher) catalogPage(ctx context.Context, reg name.Registry, next string, pageSize int) (*Catalogs, error) { - if next == "" { - uri := &url.URL{ - Scheme: reg.Scheme(), - Host: reg.RegistryStr(), - Path: "/v2/_catalog", - } - if pageSize > 0 { - uri.RawQuery = fmt.Sprintf("n=%d", pageSize) - } - next = uri.String() - } - - req, err := http.NewRequestWithContext(ctx, "GET", next, nil) - if err != nil { - return nil, err - } - - resp, err := f.client.Do(req) - if err != nil { - return nil, err - } - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, err - } - - parsed := Catalogs{} - if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { - return nil, err - } - - if err := resp.Body.Close(); err != nil { - return nil, err - } - - uri, err := getNextPageURL(resp) - if err != nil { - return nil, err - } - - if uri != nil { - parsed.Next = uri.String() - } - - return &parsed, nil -} - -type Catalogger struct { - f *fetcher - reg name.Registry - pageSize int - - page *Catalogs - err error - - needMore bool -} - -func (l *Catalogger) Next(ctx context.Context) (*Catalogs, error) { - if l.needMore { - l.page, l.err = l.f.catalogPage(ctx, l.reg, l.page.Next, l.pageSize) - } else { - l.needMore = true - } - return l.page, l.err -} - -func (l *Catalogger) HasNext() bool { - return l.page != nil && (!l.needMore || l.page.Next != "") -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go deleted file mode 100644 index b4395c2391..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "context" - "fmt" - "net/http" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" -) - -// CheckPushPermission returns an error if the given keychain cannot authorize -// a push operation to the given ref. -// -// This can be useful to check whether the caller has permission to push an -// image before doing work to construct the image. -// -// TODO(#412): Remove the need for this method. -func CheckPushPermission(ref name.Reference, kc authn.Keychain, t http.RoundTripper) error { - auth, err := kc.Resolve(ref.Context().Registry) - if err != nil { - return fmt.Errorf("resolving authorization for %v failed: %w", ref.Context().Registry, err) - } - - scopes := []string{ref.Scope(transport.PushScope)} - tr, err := transport.NewWithContext(context.TODO(), ref.Context().Registry, auth, t, scopes) - if err != nil { - return fmt.Errorf("creating push check transport for %v failed: %w", ref.Context().Registry, err) - } - // TODO(jasonhall): Against GCR, just doing the token handshake is - // enough, but this doesn't extend to Dockerhub - // (https://github.com/docker/hub-feedback/issues/1771), so we actually - // need to initiate an upload to tell whether the credentials can - // authorize a push. Figure out how to return early here when we can, - // to avoid a roundtrip for spec-compliant registries. - w := writer{ - repo: ref.Context(), - client: &http.Client{Transport: tr}, - } - loc, _, err := w.initiateUpload(context.Background(), "", "", "") - if loc != "" { - // Since we're only initiating the upload to check whether we - // can, we should attempt to cancel it, in case initiating - // reserves some resources on the server. We shouldn't wait for - // cancelling to complete, and we don't care if it fails. - go w.cancelUpload(loc) - } - return err -} - -func (w *writer) cancelUpload(loc string) { - req, err := http.NewRequest(http.MethodDelete, loc, nil) - if err != nil { - return - } - _, _ = w.client.Do(req) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go deleted file mode 100644 index 36e1d0816e..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "github.com/google/go-containerregistry/pkg/name" -) - -// Delete removes the specified image reference from the remote registry. -func Delete(ref name.Reference, options ...Option) error { - o, err := makeOptions(options...) - if err != nil { - return err - } - return newPusher(o).Delete(o.context, ref) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go deleted file mode 100644 index fafe910e95..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "context" - "errors" - "fmt" - - "github.com/google/go-containerregistry/pkg/logs" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -var allManifestMediaTypes = append(append([]types.MediaType{ - types.DockerManifestSchema1, - types.DockerManifestSchema1Signed, -}, acceptableImageMediaTypes...), acceptableIndexMediaTypes...) - -// ErrSchema1 indicates that we received a schema1 manifest from the registry. -// This library doesn't have plans to support this legacy image format: -// https://github.com/google/go-containerregistry/issues/377 -var ErrSchema1 = errors.New("see https://github.com/google/go-containerregistry/issues/377") - -// newErrSchema1 returns an ErrSchema1 with the unexpected MediaType. -func newErrSchema1(schema types.MediaType) error { - return fmt.Errorf("unsupported MediaType: %q, %w", schema, ErrSchema1) -} - -// Descriptor provides access to metadata about remote artifact and accessors -// for efficiently converting it into a v1.Image or v1.ImageIndex. -type Descriptor struct { - fetcher fetcher - v1.Descriptor - - ref name.Reference - Manifest []byte - ctx context.Context - - // So we can share this implementation with Image. - platform v1.Platform -} - -func (d *Descriptor) toDesc() v1.Descriptor { - return d.Descriptor -} - -// RawManifest exists to satisfy the Taggable interface. -func (d *Descriptor) RawManifest() ([]byte, error) { - return d.Manifest, nil -} - -// Get returns a remote.Descriptor for the given reference. The response from -// the registry is left un-interpreted, for the most part. This is useful for -// querying what kind of artifact a reference represents. -// -// See Head if you don't need the response body. -func Get(ref name.Reference, options ...Option) (*Descriptor, error) { - return get(ref, allManifestMediaTypes, options...) -} - -// Head returns a v1.Descriptor for the given reference by issuing a HEAD -// request. -// -// Note that the server response will not have a body, so any errors encountered -// should be retried with Get to get more details. -func Head(ref name.Reference, options ...Option) (*v1.Descriptor, error) { - o, err := makeOptions(options...) - if err != nil { - return nil, err - } - - return newPuller(o).Head(o.context, ref) -} - -// Handle options and fetch the manifest with the acceptable MediaTypes in the -// Accept header. -func get(ref name.Reference, acceptable []types.MediaType, options ...Option) (*Descriptor, error) { - o, err := makeOptions(options...) - if err != nil { - return nil, err - } - return newPuller(o).get(o.context, ref, acceptable, o.platform) -} - -// Image converts the Descriptor into a v1.Image. -// -// If the fetched artifact is already an image, it will just return it. -// -// If the fetched artifact is an index, it will attempt to resolve the index to -// a child image with the appropriate platform. -// -// See WithPlatform to set the desired platform. -func (d *Descriptor) Image() (v1.Image, error) { - switch d.MediaType { - case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: - // We don't care to support schema 1 images: - // https://github.com/google/go-containerregistry/issues/377 - return nil, newErrSchema1(d.MediaType) - case types.OCIImageIndex, types.DockerManifestList: - // We want an image but the registry has an index, resolve it to an image. - return d.remoteIndex().imageByPlatform(d.platform) - case types.OCIManifestSchema1, types.DockerManifestSchema2: - // These are expected. Enumerated here to allow a default case. - default: - // We could just return an error here, but some registries (e.g. static - // registries) don't set the Content-Type headers correctly, so instead... - logs.Warn.Printf("Unexpected media type for Image(): %s", d.MediaType) - } - - // Wrap the v1.Layers returned by this v1.Image in a hint for downstream - // remote.Write calls to facilitate cross-repo "mounting". - imgCore, err := partial.CompressedToImage(d.remoteImage()) - if err != nil { - return nil, err - } - return &mountableImage{ - Image: imgCore, - Reference: d.ref, - }, nil -} - -// Schema1 converts the Descriptor into a v1.Image for v2 schema 1 media types. -// -// The v1.Image returned by this method does not implement the entire interface because it would be inefficient. -// This exists mostly to make it easier to copy schema 1 images around or look at their filesystems. -// This is separate from Image() to avoid a backward incompatible change for callers expecting ErrSchema1. -func (d *Descriptor) Schema1() (v1.Image, error) { - i := &schema1{ - ref: d.ref, - fetcher: d.fetcher, - ctx: d.ctx, - manifest: d.Manifest, - mediaType: d.MediaType, - descriptor: &d.Descriptor, - } - - return &mountableImage{ - Image: i, - Reference: d.ref, - }, nil -} - -// ImageIndex converts the Descriptor into a v1.ImageIndex. -func (d *Descriptor) ImageIndex() (v1.ImageIndex, error) { - switch d.MediaType { - case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: - // We don't care to support schema 1 images: - // https://github.com/google/go-containerregistry/issues/377 - return nil, newErrSchema1(d.MediaType) - case types.OCIManifestSchema1, types.DockerManifestSchema2: - // We want an index but the registry has an image, nothing we can do. - return nil, fmt.Errorf("unexpected media type for ImageIndex(): %s; call Image() instead", d.MediaType) - case types.OCIImageIndex, types.DockerManifestList: - // These are expected. - default: - // We could just return an error here, but some registries (e.g. static - // registries) don't set the Content-Type headers correctly, so instead... - logs.Warn.Printf("Unexpected media type for ImageIndex(): %s", d.MediaType) - } - return d.remoteIndex(), nil -} - -func (d *Descriptor) remoteImage() *remoteImage { - return &remoteImage{ - ref: d.ref, - ctx: d.ctx, - fetcher: d.fetcher, - manifest: d.Manifest, - mediaType: d.MediaType, - descriptor: &d.Descriptor, - } -} - -func (d *Descriptor) remoteIndex() *remoteIndex { - return &remoteIndex{ - ref: d.ref, - ctx: d.ctx, - fetcher: d.fetcher, - manifest: d.Manifest, - mediaType: d.MediaType, - descriptor: &d.Descriptor, - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go deleted file mode 100644 index 846ba07cda..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package remote provides facilities for reading/writing v1.Images from/to -// a remote image registry. -package remote diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go deleted file mode 100644 index 4e61002bea..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2023 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "net/url" - "strings" - - "github.com/google/go-containerregistry/internal/redact" - "github.com/google/go-containerregistry/internal/verify" - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -const ( - kib = 1024 - mib = 1024 * kib - manifestLimit = 100 * mib -) - -// fetcher implements methods for reading from a registry. -type fetcher struct { - target resource - client *http.Client -} - -func makeFetcher(ctx context.Context, target resource, o *options) (*fetcher, error) { - auth := o.auth - if o.keychain != nil { - kauth, err := o.keychain.Resolve(target) - if err != nil { - return nil, err - } - auth = kauth - } - - reg, ok := target.(name.Registry) - if !ok { - repo, ok := target.(name.Repository) - if !ok { - return nil, fmt.Errorf("unexpected resource: %T", target) - } - reg = repo.Registry - } - - tr, err := transport.NewWithContext(ctx, reg, auth, o.transport, []string{target.Scope(transport.PullScope)}) - if err != nil { - return nil, err - } - return &fetcher{ - target: target, - client: &http.Client{Transport: tr}, - }, nil -} - -func (f *fetcher) Do(req *http.Request) (*http.Response, error) { - return f.client.Do(req) -} - -type resource interface { - Scheme() string - RegistryStr() string - Scope(string) string - - authn.Resource -} - -// url returns a url.Url for the specified path in the context of this remote image reference. -func (f *fetcher) url(resource, identifier string) url.URL { - u := url.URL{ - Scheme: f.target.Scheme(), - Host: f.target.RegistryStr(), - // Default path if this is not a repository. - Path: "/v2/_catalog", - } - if repo, ok := f.target.(name.Repository); ok { - u.Path = fmt.Sprintf("/v2/%s/%s/%s", repo.RepositoryStr(), resource, identifier) - } - return u -} - -func (f *fetcher) get(ctx context.Context, ref name.Reference, acceptable []types.MediaType, platform v1.Platform) (*Descriptor, error) { - b, desc, err := f.fetchManifest(ctx, ref, acceptable) - if err != nil { - return nil, err - } - return &Descriptor{ - ref: ref, - ctx: ctx, - fetcher: *f, - Manifest: b, - Descriptor: *desc, - platform: platform, - }, nil -} - -func (f *fetcher) fetchManifest(ctx context.Context, ref name.Reference, acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) { - u := f.url("manifests", ref.Identifier()) - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, nil, err - } - accept := []string{} - for _, mt := range acceptable { - accept = append(accept, string(mt)) - } - req.Header.Set("Accept", strings.Join(accept, ",")) - - resp, err := f.client.Do(req.WithContext(ctx)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, nil, err - } - - manifest, err := io.ReadAll(io.LimitReader(resp.Body, manifestLimit)) - if err != nil { - return nil, nil, err - } - - digest, size, err := v1.SHA256(bytes.NewReader(manifest)) - if err != nil { - return nil, nil, err - } - - mediaType := types.MediaType(resp.Header.Get("Content-Type")) - contentDigest, err := v1.NewHash(resp.Header.Get("Docker-Content-Digest")) - if err == nil && mediaType == types.DockerManifestSchema1Signed { - // If we can parse the digest from the header, and it's a signed schema 1 - // manifest, let's use that for the digest to appease older registries. - digest = contentDigest - } - - // Validate the digest matches what we asked for, if pulling by digest. - if dgst, ok := ref.(name.Digest); ok { - if digest.String() != dgst.DigestStr() { - return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), ref) - } - } - - var artifactType string - mf, _ := v1.ParseManifest(bytes.NewReader(manifest)) - // Failing to parse as a manifest should just be ignored. - // The manifest might not be valid, and that's okay. - if mf != nil && !mf.Config.MediaType.IsConfig() { - artifactType = string(mf.Config.MediaType) - } - - // Do nothing for tags; I give up. - // - // We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry, - // but so many registries implement this incorrectly that it's not worth checking. - // - // For reference: - // https://github.com/GoogleContainerTools/kaniko/issues/298 - - // Return all this info since we have to calculate it anyway. - desc := v1.Descriptor{ - Digest: digest, - Size: size, - MediaType: mediaType, - ArtifactType: artifactType, - } - - return manifest, &desc, nil -} - -func (f *fetcher) headManifest(ctx context.Context, ref name.Reference, acceptable []types.MediaType) (*v1.Descriptor, error) { - u := f.url("manifests", ref.Identifier()) - req, err := http.NewRequest(http.MethodHead, u.String(), nil) - if err != nil { - return nil, err - } - accept := []string{} - for _, mt := range acceptable { - accept = append(accept, string(mt)) - } - req.Header.Set("Accept", strings.Join(accept, ",")) - - resp, err := f.client.Do(req.WithContext(ctx)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, err - } - - mth := resp.Header.Get("Content-Type") - if mth == "" { - return nil, fmt.Errorf("HEAD %s: response did not include Content-Type header", u.String()) - } - mediaType := types.MediaType(mth) - - size := resp.ContentLength - if size == -1 { - return nil, fmt.Errorf("GET %s: response did not include Content-Length header", u.String()) - } - - dh := resp.Header.Get("Docker-Content-Digest") - if dh == "" { - return nil, fmt.Errorf("HEAD %s: response did not include Docker-Content-Digest header", u.String()) - } - digest, err := v1.NewHash(dh) - if err != nil { - return nil, err - } - - // Validate the digest matches what we asked for, if pulling by digest. - if dgst, ok := ref.(name.Digest); ok { - if digest.String() != dgst.DigestStr() { - return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), ref) - } - } - - // Return all this info since we have to calculate it anyway. - return &v1.Descriptor{ - Digest: digest, - Size: size, - MediaType: mediaType, - }, nil -} - -func (f *fetcher) fetchBlob(ctx context.Context, size int64, h v1.Hash) (io.ReadCloser, error) { - u := f.url("blobs", h.String()) - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, err - } - - resp, err := f.client.Do(req.WithContext(ctx)) - if err != nil { - return nil, redact.Error(err) - } - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - resp.Body.Close() - return nil, err - } - - // Do whatever we can. - // If we have an expected size and Content-Length doesn't match, return an error. - // If we don't have an expected size and we do have a Content-Length, use Content-Length. - if hsize := resp.ContentLength; hsize != -1 { - if size == verify.SizeUnknown { - size = hsize - } else if hsize != size { - return nil, fmt.Errorf("GET %s: Content-Length header %d does not match expected size %d", u.String(), hsize, size) - } - } - - return verify.ReadCloser(resp.Body, size, h) -} - -func (f *fetcher) headBlob(ctx context.Context, h v1.Hash) (*http.Response, error) { - u := f.url("blobs", h.String()) - req, err := http.NewRequest(http.MethodHead, u.String(), nil) - if err != nil { - return nil, err - } - - resp, err := f.client.Do(req.WithContext(ctx)) - if err != nil { - return nil, redact.Error(err) - } - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - resp.Body.Close() - return nil, err - } - - return resp, nil -} - -func (f *fetcher) blobExists(ctx context.Context, h v1.Hash) (bool, error) { - u := f.url("blobs", h.String()) - req, err := http.NewRequest(http.MethodHead, u.String(), nil) - if err != nil { - return false, err - } - - resp, err := f.client.Do(req.WithContext(ctx)) - if err != nil { - return false, redact.Error(err) - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { - return false, err - } - - return resp.StatusCode == http.StatusOK, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go deleted file mode 100644 index f085967ed5..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "context" - "io" - "net/http" - "net/url" - "sync" - - "github.com/google/go-containerregistry/internal/redact" - "github.com/google/go-containerregistry/internal/verify" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -var acceptableImageMediaTypes = []types.MediaType{ - types.DockerManifestSchema2, - types.OCIManifestSchema1, -} - -// remoteImage accesses an image from a remote registry -type remoteImage struct { - fetcher fetcher - ref name.Reference - ctx context.Context - manifestLock sync.Mutex // Protects manifest - manifest []byte - configLock sync.Mutex // Protects config - config []byte - mediaType types.MediaType - descriptor *v1.Descriptor -} - -func (r *remoteImage) ArtifactType() (string, error) { - // kind of a hack, but RawManifest does appropriate locking/memoization - // and makes sure r.descriptor is populated. - if _, err := r.RawManifest(); err != nil { - return "", err - } - return r.descriptor.ArtifactType, nil -} - -var _ partial.CompressedImageCore = (*remoteImage)(nil) - -// Image provides access to a remote image reference. -func Image(ref name.Reference, options ...Option) (v1.Image, error) { - desc, err := Get(ref, options...) - if err != nil { - return nil, err - } - - return desc.Image() -} - -func (r *remoteImage) MediaType() (types.MediaType, error) { - if string(r.mediaType) != "" { - return r.mediaType, nil - } - return types.DockerManifestSchema2, nil -} - -func (r *remoteImage) RawManifest() ([]byte, error) { - r.manifestLock.Lock() - defer r.manifestLock.Unlock() - if r.manifest != nil { - return r.manifest, nil - } - - // NOTE(jonjohnsonjr): We should never get here because the public entrypoints - // do type-checking via remote.Descriptor. I've left this here for tests that - // directly instantiate a remoteImage. - manifest, desc, err := r.fetcher.fetchManifest(r.ctx, r.ref, acceptableImageMediaTypes) - if err != nil { - return nil, err - } - - if r.descriptor == nil { - r.descriptor = desc - } - r.mediaType = desc.MediaType - r.manifest = manifest - return r.manifest, nil -} - -func (r *remoteImage) RawConfigFile() ([]byte, error) { - r.configLock.Lock() - defer r.configLock.Unlock() - if r.config != nil { - return r.config, nil - } - - m, err := partial.Manifest(r) - if err != nil { - return nil, err - } - - if m.Config.Data != nil { - if err := verify.Descriptor(m.Config); err != nil { - return nil, err - } - r.config = m.Config.Data - return r.config, nil - } - - body, err := r.fetcher.fetchBlob(r.ctx, m.Config.Size, m.Config.Digest) - if err != nil { - return nil, err - } - defer body.Close() - - r.config, err = io.ReadAll(body) - if err != nil { - return nil, err - } - return r.config, nil -} - -// Descriptor retains the original descriptor from an index manifest. -// See partial.Descriptor. -func (r *remoteImage) Descriptor() (*v1.Descriptor, error) { - // kind of a hack, but RawManifest does appropriate locking/memoization - // and makes sure r.descriptor is populated. - _, err := r.RawManifest() - return r.descriptor, err -} - -func (r *remoteImage) ConfigLayer() (v1.Layer, error) { - if _, err := r.RawManifest(); err != nil { - return nil, err - } - m, err := partial.Manifest(r) - if err != nil { - return nil, err - } - - return partial.CompressedToLayer(&remoteImageLayer{ - ri: r, - ctx: r.ctx, - digest: m.Config.Digest, - }) -} - -// remoteImageLayer implements partial.CompressedLayer -type remoteImageLayer struct { - ri *remoteImage - ctx context.Context - digest v1.Hash -} - -// Digest implements partial.CompressedLayer -func (rl *remoteImageLayer) Digest() (v1.Hash, error) { - return rl.digest, nil -} - -// Compressed implements partial.CompressedLayer -func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) { - urls := []url.URL{rl.ri.fetcher.url("blobs", rl.digest.String())} - - // Add alternative layer sources from URLs (usually none). - d, err := partial.BlobDescriptor(rl, rl.digest) - if err != nil { - return nil, err - } - - if d.Data != nil { - return verify.ReadCloser(io.NopCloser(bytes.NewReader(d.Data)), d.Size, d.Digest) - } - - // We don't want to log binary layers -- this can break terminals. - ctx := redact.NewContext(rl.ctx, "omitting binary blobs from logs") - - for _, s := range d.URLs { - u, err := url.Parse(s) - if err != nil { - return nil, err - } - urls = append(urls, *u) - } - - // The lastErr for most pulls will be the same (the first error), but for - // foreign layers we'll want to surface the last one, since we try to pull - // from the registry first, which would often fail. - // TODO: Maybe we don't want to try pulling from the registry first? - var lastErr error - for _, u := range urls { - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, err - } - - resp, err := rl.ri.fetcher.Do(req.WithContext(ctx)) - if err != nil { - lastErr = err - continue - } - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - resp.Body.Close() - lastErr = err - continue - } - - return verify.ReadCloser(resp.Body, d.Size, rl.digest) - } - - return nil, lastErr -} - -// Manifest implements partial.WithManifest so that we can use partial.BlobSize below. -func (rl *remoteImageLayer) Manifest() (*v1.Manifest, error) { - return partial.Manifest(rl.ri) -} - -// MediaType implements v1.Layer -func (rl *remoteImageLayer) MediaType() (types.MediaType, error) { - bd, err := partial.BlobDescriptor(rl, rl.digest) - if err != nil { - return "", err - } - - return bd.MediaType, nil -} - -// Size implements partial.CompressedLayer -func (rl *remoteImageLayer) Size() (int64, error) { - // Look up the size of this digest in the manifest to avoid a request. - return partial.BlobSize(rl, rl.digest) -} - -// ConfigFile implements partial.WithManifestAndConfigFile so that we can use partial.BlobToDiffID below. -func (rl *remoteImageLayer) ConfigFile() (*v1.ConfigFile, error) { - return partial.ConfigFile(rl.ri) -} - -// DiffID implements partial.WithDiffID so that we don't recompute a DiffID that we already have -// available in our ConfigFile. -func (rl *remoteImageLayer) DiffID() (v1.Hash, error) { - return partial.BlobToDiffID(rl, rl.digest) -} - -// Descriptor retains the original descriptor from an image manifest. -// See partial.Descriptor. -func (rl *remoteImageLayer) Descriptor() (*v1.Descriptor, error) { - return partial.BlobDescriptor(rl, rl.digest) -} - -// See partial.Exists. -func (rl *remoteImageLayer) Exists() (bool, error) { - return rl.ri.fetcher.blobExists(rl.ri.ctx, rl.digest) -} - -// LayerByDigest implements partial.CompressedLayer -func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { - return &remoteImageLayer{ - ri: r, - ctx: r.ctx, - digest: h, - }, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go deleted file mode 100644 index b80972c80e..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "context" - "fmt" - "sync" - - "github.com/google/go-containerregistry/internal/verify" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -var acceptableIndexMediaTypes = []types.MediaType{ - types.DockerManifestList, - types.OCIImageIndex, -} - -// remoteIndex accesses an index from a remote registry -type remoteIndex struct { - fetcher fetcher - ref name.Reference - ctx context.Context - manifestLock sync.Mutex // Protects manifest - manifest []byte - mediaType types.MediaType - descriptor *v1.Descriptor -} - -// Index provides access to a remote index reference. -func Index(ref name.Reference, options ...Option) (v1.ImageIndex, error) { - desc, err := get(ref, acceptableIndexMediaTypes, options...) - if err != nil { - return nil, err - } - - return desc.ImageIndex() -} - -func (r *remoteIndex) MediaType() (types.MediaType, error) { - if string(r.mediaType) != "" { - return r.mediaType, nil - } - return types.DockerManifestList, nil -} - -func (r *remoteIndex) Digest() (v1.Hash, error) { - return partial.Digest(r) -} - -func (r *remoteIndex) Size() (int64, error) { - return partial.Size(r) -} - -func (r *remoteIndex) RawManifest() ([]byte, error) { - r.manifestLock.Lock() - defer r.manifestLock.Unlock() - if r.manifest != nil { - return r.manifest, nil - } - - // NOTE(jonjohnsonjr): We should never get here because the public entrypoints - // do type-checking via remote.Descriptor. I've left this here for tests that - // directly instantiate a remoteIndex. - manifest, desc, err := r.fetcher.fetchManifest(r.ctx, r.ref, acceptableIndexMediaTypes) - if err != nil { - return nil, err - } - - if r.descriptor == nil { - r.descriptor = desc - } - r.mediaType = desc.MediaType - r.manifest = manifest - return r.manifest, nil -} - -func (r *remoteIndex) IndexManifest() (*v1.IndexManifest, error) { - b, err := r.RawManifest() - if err != nil { - return nil, err - } - return v1.ParseIndexManifest(bytes.NewReader(b)) -} - -func (r *remoteIndex) Image(h v1.Hash) (v1.Image, error) { - desc, err := r.childByHash(h) - if err != nil { - return nil, err - } - - // Descriptor.Image will handle coercing nested indexes into an Image. - return desc.Image() -} - -// Descriptor retains the original descriptor from an index manifest. -// See partial.Descriptor. -func (r *remoteIndex) Descriptor() (*v1.Descriptor, error) { - // kind of a hack, but RawManifest does appropriate locking/memoization - // and makes sure r.descriptor is populated. - _, err := r.RawManifest() - return r.descriptor, err -} - -func (r *remoteIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { - desc, err := r.childByHash(h) - if err != nil { - return nil, err - } - return desc.ImageIndex() -} - -// Workaround for #819. -func (r *remoteIndex) Layer(h v1.Hash) (v1.Layer, error) { - index, err := r.IndexManifest() - if err != nil { - return nil, err - } - for _, childDesc := range index.Manifests { - if h == childDesc.Digest { - l, err := partial.CompressedToLayer(&remoteLayer{ - fetcher: r.fetcher, - ctx: r.ctx, - digest: h, - }) - if err != nil { - return nil, err - } - return &MountableLayer{ - Layer: l, - Reference: r.ref.Context().Digest(h.String()), - }, nil - } - } - return nil, fmt.Errorf("layer not found: %s", h) -} - -func (r *remoteIndex) imageByPlatform(platform v1.Platform) (v1.Image, error) { - desc, err := r.childByPlatform(platform) - if err != nil { - return nil, err - } - - // Descriptor.Image will handle coercing nested indexes into an Image. - return desc.Image() -} - -// This naively matches the first manifest with matching platform attributes. -// -// We should probably use this instead: -// -// github.com/containerd/containerd/platforms -// -// But first we'd need to migrate to: -// -// github.com/opencontainers/image-spec/specs-go/v1 -func (r *remoteIndex) childByPlatform(platform v1.Platform) (*Descriptor, error) { - index, err := r.IndexManifest() - if err != nil { - return nil, err - } - for _, childDesc := range index.Manifests { - // If platform is missing from child descriptor, assume it's amd64/linux. - p := defaultPlatform - if childDesc.Platform != nil { - p = *childDesc.Platform - } - - if matchesPlatform(p, platform) { - return r.childDescriptor(childDesc, platform) - } - } - return nil, fmt.Errorf("no child with platform %+v in index %s", platform, r.ref) -} - -func (r *remoteIndex) childByHash(h v1.Hash) (*Descriptor, error) { - index, err := r.IndexManifest() - if err != nil { - return nil, err - } - for _, childDesc := range index.Manifests { - if h == childDesc.Digest { - return r.childDescriptor(childDesc, defaultPlatform) - } - } - return nil, fmt.Errorf("no child with digest %s in index %s", h, r.ref) -} - -// Convert one of this index's child's v1.Descriptor into a remote.Descriptor, with the given platform option. -func (r *remoteIndex) childDescriptor(child v1.Descriptor, platform v1.Platform) (*Descriptor, error) { - ref := r.ref.Context().Digest(child.Digest.String()) - var ( - manifest []byte - err error - ) - if child.Data != nil { - if err := verify.Descriptor(child); err != nil { - return nil, err - } - manifest = child.Data - } else { - manifest, _, err = r.fetcher.fetchManifest(r.ctx, ref, []types.MediaType{child.MediaType}) - if err != nil { - return nil, err - } - } - - if child.MediaType.IsImage() { - mf, _ := v1.ParseManifest(bytes.NewReader(manifest)) - // Failing to parse as a manifest should just be ignored. - // The manifest might not be valid, and that's okay. - if mf != nil && !mf.Config.MediaType.IsConfig() { - child.ArtifactType = string(mf.Config.MediaType) - } - } - - return &Descriptor{ - ref: ref, - ctx: r.ctx, - fetcher: r.fetcher, - Manifest: manifest, - Descriptor: child, - platform: platform, - }, nil -} - -// matchesPlatform checks if the given platform matches the required platforms. -// The given platform matches the required platform if -// - architecture and OS are identical. -// - OS version and variant are identical if provided. -// - features and OS features of the required platform are subsets of those of the given platform. -func matchesPlatform(given, required v1.Platform) bool { - // Required fields that must be identical. - if given.Architecture != required.Architecture || given.OS != required.OS { - return false - } - - // Optional fields that may be empty, but must be identical if provided. - if required.OSVersion != "" && given.OSVersion != required.OSVersion { - return false - } - if required.Variant != "" && given.Variant != required.Variant { - return false - } - - // Verify required platform's features are a subset of given platform's features. - if !isSubset(given.OSFeatures, required.OSFeatures) { - return false - } - if !isSubset(given.Features, required.Features) { - return false - } - - return true -} - -// isSubset checks if the required array of strings is a subset of the given lst. -func isSubset(lst, required []string) bool { - set := make(map[string]bool) - for _, value := range lst { - set[value] = true - } - - for _, value := range required { - if _, ok := set[value]; !ok { - return false - } - } - - return true -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go deleted file mode 100644 index 39c2059502..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "context" - "io" - - "github.com/google/go-containerregistry/internal/redact" - "github.com/google/go-containerregistry/internal/verify" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// remoteImagelayer implements partial.CompressedLayer -type remoteLayer struct { - ctx context.Context - fetcher fetcher - digest v1.Hash -} - -// Compressed implements partial.CompressedLayer -func (rl *remoteLayer) Compressed() (io.ReadCloser, error) { - // We don't want to log binary layers -- this can break terminals. - ctx := redact.NewContext(rl.ctx, "omitting binary blobs from logs") - return rl.fetcher.fetchBlob(ctx, verify.SizeUnknown, rl.digest) -} - -// Compressed implements partial.CompressedLayer -func (rl *remoteLayer) Size() (int64, error) { - resp, err := rl.fetcher.headBlob(rl.ctx, rl.digest) - if err != nil { - return -1, err - } - defer resp.Body.Close() - return resp.ContentLength, nil -} - -// Digest implements partial.CompressedLayer -func (rl *remoteLayer) Digest() (v1.Hash, error) { - return rl.digest, nil -} - -// MediaType implements v1.Layer -func (rl *remoteLayer) MediaType() (types.MediaType, error) { - return types.DockerLayer, nil -} - -// See partial.Exists. -func (rl *remoteLayer) Exists() (bool, error) { - return rl.fetcher.blobExists(rl.ctx, rl.digest) -} - -// Layer reads the given blob reference from a registry as a Layer. A blob -// reference here is just a punned name.Digest where the digest portion is the -// digest of the blob to be read and the repository portion is the repo where -// that blob lives. -func Layer(ref name.Digest, options ...Option) (v1.Layer, error) { - o, err := makeOptions(options...) - if err != nil { - return nil, err - } - return newPuller(o).Layer(o.context, ref) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go deleted file mode 100644 index 910d2a94c7..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" -) - -// ListWithContext calls List with the given context. -// -// Deprecated: Use List and WithContext. This will be removed in a future release. -func ListWithContext(ctx context.Context, repo name.Repository, options ...Option) ([]string, error) { - return List(repo, append(options, WithContext(ctx))...) -} - -// List calls /tags/list for the given repository, returning the list of tags -// in the "tags" property. -func List(repo name.Repository, options ...Option) ([]string, error) { - o, err := makeOptions(options...) - if err != nil { - return nil, err - } - return newPuller(o).List(o.context, repo) -} - -type Tags struct { - Name string `json:"name"` - Tags []string `json:"tags"` - Next string `json:"next,omitempty"` -} - -func (f *fetcher) listPage(ctx context.Context, repo name.Repository, next string, pageSize int) (*Tags, error) { - if next == "" { - uri := &url.URL{ - Scheme: repo.Scheme(), - Host: repo.RegistryStr(), - Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()), - } - if pageSize > 0 { - uri.RawQuery = fmt.Sprintf("n=%d", pageSize) - } - next = uri.String() - } - - req, err := http.NewRequestWithContext(ctx, "GET", next, nil) - if err != nil { - return nil, err - } - - resp, err := f.client.Do(req) - if err != nil { - return nil, err - } - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, err - } - - parsed := Tags{} - if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { - return nil, err - } - - if err := resp.Body.Close(); err != nil { - return nil, err - } - - uri, err := getNextPageURL(resp) - if err != nil { - return nil, err - } - - if uri != nil { - parsed.Next = uri.String() - } - - return &parsed, nil -} - -// getNextPageURL checks if there is a Link header in a http.Response which -// contains a link to the next page. If yes it returns the url.URL of the next -// page otherwise it returns nil. -func getNextPageURL(resp *http.Response) (*url.URL, error) { - link := resp.Header.Get("Link") - if link == "" { - return nil, nil - } - - if link[0] != '<' { - return nil, fmt.Errorf("failed to parse link header: missing '<' in: %s", link) - } - - end := strings.Index(link, ">") - if end == -1 { - return nil, fmt.Errorf("failed to parse link header: missing '>' in: %s", link) - } - link = link[1:end] - - linkURL, err := url.Parse(link) - if err != nil { - return nil, err - } - if resp.Request == nil || resp.Request.URL == nil { - return nil, nil - } - linkURL = resp.Request.URL.ResolveReference(linkURL) - return linkURL, nil -} - -type Lister struct { - f *fetcher - repo name.Repository - pageSize int - - page *Tags - err error - - needMore bool -} - -func (l *Lister) Next(ctx context.Context) (*Tags, error) { - if l.needMore { - l.page, l.err = l.f.listPage(ctx, l.repo, l.page.Next, l.pageSize) - } else { - l.needMore = true - } - return l.page, l.err -} - -func (l *Lister) HasNext() bool { - return l.page != nil && (!l.needMore || l.page.Next != "") -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go deleted file mode 100644 index 36d088567d..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" -) - -// MountableLayer wraps a v1.Layer in a shim that enables the layer to be -// "mounted" when published to another registry. -type MountableLayer struct { - v1.Layer - - Reference name.Reference -} - -// Descriptor retains the original descriptor from an image manifest. -// See partial.Descriptor. -func (ml *MountableLayer) Descriptor() (*v1.Descriptor, error) { - return partial.Descriptor(ml.Layer) -} - -// Exists is a hack. See partial.Exists. -func (ml *MountableLayer) Exists() (bool, error) { - return partial.Exists(ml.Layer) -} - -// mountableImage wraps the v1.Layer references returned by the embedded v1.Image -// in MountableLayer's so that remote.Write might attempt to mount them from their -// source repository. -type mountableImage struct { - v1.Image - - Reference name.Reference -} - -// Layers implements v1.Image -func (mi *mountableImage) Layers() ([]v1.Layer, error) { - ls, err := mi.Image.Layers() - if err != nil { - return nil, err - } - mls := make([]v1.Layer, 0, len(ls)) - for _, l := range ls { - mls = append(mls, &MountableLayer{ - Layer: l, - Reference: mi.Reference, - }) - } - return mls, nil -} - -// LayerByDigest implements v1.Image -func (mi *mountableImage) LayerByDigest(d v1.Hash) (v1.Layer, error) { - l, err := mi.Image.LayerByDigest(d) - if err != nil { - return nil, err - } - return &MountableLayer{ - Layer: l, - Reference: mi.Reference, - }, nil -} - -// LayerByDiffID implements v1.Image -func (mi *mountableImage) LayerByDiffID(d v1.Hash) (v1.Layer, error) { - l, err := mi.Image.LayerByDiffID(d) - if err != nil { - return nil, err - } - return &MountableLayer{ - Layer: l, - Reference: mi.Reference, - }, nil -} - -// Descriptor retains the original descriptor from an index manifest. -// See partial.Descriptor. -func (mi *mountableImage) Descriptor() (*v1.Descriptor, error) { - return partial.Descriptor(mi.Image) -} - -// ConfigLayer retains the original reference so that it can be mounted. -// See partial.ConfigLayer. -func (mi *mountableImage) ConfigLayer() (v1.Layer, error) { - l, err := partial.ConfigLayer(mi.Image) - if err != nil { - return nil, err - } - return &MountableLayer{ - Layer: l, - Reference: mi.Reference, - }, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go deleted file mode 100644 index a6705de895..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2020 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "github.com/google/go-containerregistry/pkg/name" - "golang.org/x/sync/errgroup" -) - -// MultiWrite writes the given Images or ImageIndexes to the given refs, as -// efficiently as possible, by deduping shared layer blobs while uploading them -// in parallel. -func MultiWrite(todo map[name.Reference]Taggable, options ...Option) (rerr error) { - o, err := makeOptions(options...) - if err != nil { - return err - } - if o.progress != nil { - defer func() { o.progress.Close(rerr) }() - } - p := newPusher(o) - - g, ctx := errgroup.WithContext(o.context) - g.SetLimit(o.jobs) - - for ref, t := range todo { - ref, t := ref, t - g.Go(func() error { - return p.Push(ctx, ref, t) - }) - } - - return g.Wait() -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go deleted file mode 100644 index 99a2bb2eb2..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "context" - "errors" - "io" - "net" - "net/http" - "syscall" - "time" - - "github.com/google/go-containerregistry/internal/retry" - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/logs" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" -) - -// Option is a functional option for remote operations. -type Option func(*options) error - -type options struct { - auth authn.Authenticator - keychain authn.Keychain - transport http.RoundTripper - context context.Context - jobs int - userAgent string - allowNondistributableArtifacts bool - progress *progress - retryBackoff Backoff - retryPredicate retry.Predicate - retryStatusCodes []int - - // Only these options can overwrite Reuse()d options. - platform v1.Platform - pageSize int - filter map[string]string - - // Set by Reuse, we currently store one or the other. - puller *Puller - pusher *Pusher -} - -var defaultPlatform = v1.Platform{ - Architecture: "amd64", - OS: "linux", -} - -// Backoff is an alias of retry.Backoff to expose this configuration option to consumers of this lib -type Backoff = retry.Backoff - -var defaultRetryPredicate retry.Predicate = func(err error) bool { - // Various failure modes here, as we're often reading from and writing to - // the network. - if retry.IsTemporary(err) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, io.EOF) || errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) || errors.Is(err, net.ErrClosed) { - logs.Warn.Printf("retrying %v", err) - return true - } - return false -} - -// Try this three times, waiting 1s after first failure, 3s after second. -var defaultRetryBackoff = Backoff{ - Duration: 1.0 * time.Second, - Factor: 3.0, - Jitter: 0.1, - Steps: 3, -} - -// Useful for tests -var fastBackoff = Backoff{ - Duration: 1.0 * time.Millisecond, - Factor: 3.0, - Jitter: 0.1, - Steps: 3, -} - -var defaultRetryStatusCodes = []int{ - http.StatusRequestTimeout, - http.StatusInternalServerError, - http.StatusBadGateway, - http.StatusServiceUnavailable, - http.StatusGatewayTimeout, - 499, // nginx-specific, client closed request - 522, // Cloudflare-specific, connection timeout -} - -const ( - defaultJobs = 4 - - // ECR returns an error if n > 1000: - // https://github.com/google/go-containerregistry/issues/1091 - defaultPageSize = 1000 -) - -// DefaultTransport is based on http.DefaultTransport with modifications -// documented inline below. -var DefaultTransport http.RoundTripper = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - ForceAttemptHTTP2: true, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - // We usually are dealing with 2 hosts (at most), split MaxIdleConns between them. - MaxIdleConnsPerHost: 50, -} - -func makeOptions(opts ...Option) (*options, error) { - o := &options{ - transport: DefaultTransport, - platform: defaultPlatform, - context: context.Background(), - jobs: defaultJobs, - pageSize: defaultPageSize, - retryPredicate: defaultRetryPredicate, - retryBackoff: defaultRetryBackoff, - retryStatusCodes: defaultRetryStatusCodes, - } - - for _, option := range opts { - if err := option(o); err != nil { - return nil, err - } - } - - switch { - case o.auth != nil && o.keychain != nil: - // It is a better experience to explicitly tell a caller their auth is misconfigured - // than potentially fail silently when the correct auth is overridden by option misuse. - return nil, errors.New("provide an option for either authn.Authenticator or authn.Keychain, not both") - case o.auth == nil: - o.auth = authn.Anonymous - } - - // transport.Wrapper is a signal that consumers are opt-ing into providing their own transport without any additional wrapping. - // This is to allow consumers full control over the transports logic, such as providing retry logic. - if _, ok := o.transport.(*transport.Wrapper); !ok { - // Wrap the transport in something that logs requests and responses. - // It's expensive to generate the dumps, so skip it if we're writing - // to nothing. - if logs.Enabled(logs.Debug) { - o.transport = transport.NewLogger(o.transport) - } - - // Wrap the transport in something that can retry network flakes. - o.transport = transport.NewRetry(o.transport, transport.WithRetryPredicate(defaultRetryPredicate), transport.WithRetryStatusCodes(o.retryStatusCodes...)) - - // Wrap this last to prevent transport.New from double-wrapping. - if o.userAgent != "" { - o.transport = transport.NewUserAgent(o.transport, o.userAgent) - } - } - - return o, nil -} - -// WithTransport is a functional option for overriding the default transport -// for remote operations. -// If transport.Wrapper is provided, this signals that the consumer does *not* want any further wrapping to occur. -// i.e. logging, retry and useragent -// -// The default transport is DefaultTransport. -func WithTransport(t http.RoundTripper) Option { - return func(o *options) error { - o.transport = t - return nil - } -} - -// WithAuth is a functional option for overriding the default authenticator -// for remote operations. -// It is an error to use both WithAuth and WithAuthFromKeychain in the same Option set. -// -// The default authenticator is authn.Anonymous. -func WithAuth(auth authn.Authenticator) Option { - return func(o *options) error { - o.auth = auth - return nil - } -} - -// WithAuthFromKeychain is a functional option for overriding the default -// authenticator for remote operations, using an authn.Keychain to find -// credentials. -// It is an error to use both WithAuth and WithAuthFromKeychain in the same Option set. -// -// The default authenticator is authn.Anonymous. -func WithAuthFromKeychain(keys authn.Keychain) Option { - return func(o *options) error { - o.keychain = keys - return nil - } -} - -// WithPlatform is a functional option for overriding the default platform -// that Image and Descriptor.Image use for resolving an index to an image. -// -// The default platform is amd64/linux. -func WithPlatform(p v1.Platform) Option { - return func(o *options) error { - o.platform = p - return nil - } -} - -// WithContext is a functional option for setting the context in http requests -// performed by a given function. Note that this context is used for _all_ -// http requests, not just the initial volley. E.g., for remote.Image, the -// context will be set on http requests generated by subsequent calls to -// RawConfigFile() and even methods on layers returned by Layers(). -// -// The default context is context.Background(). -func WithContext(ctx context.Context) Option { - return func(o *options) error { - o.context = ctx - return nil - } -} - -// WithJobs is a functional option for setting the parallelism of remote -// operations performed by a given function. Note that not all remote -// operations support parallelism. -// -// The default value is 4. -func WithJobs(jobs int) Option { - return func(o *options) error { - if jobs <= 0 { - return errors.New("jobs must be greater than zero") - } - o.jobs = jobs - return nil - } -} - -// WithUserAgent adds the given string to the User-Agent header for any HTTP -// requests. This header will also include "go-containerregistry/${version}". -// -// If you want to completely overwrite the User-Agent header, use WithTransport. -func WithUserAgent(ua string) Option { - return func(o *options) error { - o.userAgent = ua - return nil - } -} - -// WithNondistributable includes non-distributable (foreign) layers -// when writing images, see: -// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers -// -// The default behaviour is to skip these layers -func WithNondistributable(o *options) error { - o.allowNondistributableArtifacts = true - return nil -} - -// WithProgress takes a channel that will receive progress updates as bytes are written. -// -// Sending updates to an unbuffered channel will block writes, so callers -// should provide a buffered channel to avoid potential deadlocks. -func WithProgress(updates chan<- v1.Update) Option { - return func(o *options) error { - o.progress = &progress{updates: updates} - o.progress.lastUpdate = &v1.Update{} - return nil - } -} - -// WithPageSize sets the given size as the value of parameter 'n' in the request. -// -// To omit the `n` parameter entirely, use WithPageSize(0). -// The default value is 1000. -func WithPageSize(size int) Option { - return func(o *options) error { - o.pageSize = size - return nil - } -} - -// WithRetryBackoff sets the httpBackoff for retry HTTP operations. -func WithRetryBackoff(backoff Backoff) Option { - return func(o *options) error { - o.retryBackoff = backoff - return nil - } -} - -// WithRetryPredicate sets the predicate for retry HTTP operations. -func WithRetryPredicate(predicate retry.Predicate) Option { - return func(o *options) error { - o.retryPredicate = predicate - return nil - } -} - -// WithRetryStatusCodes sets which http response codes will be retried. -func WithRetryStatusCodes(codes ...int) Option { - return func(o *options) error { - o.retryStatusCodes = codes - return nil - } -} - -// WithFilter sets the filter querystring for HTTP operations. -func WithFilter(key string, value string) Option { - return func(o *options) error { - if o.filter == nil { - o.filter = map[string]string{} - } - o.filter[key] = value - return nil - } -} - -// Reuse takes a Puller or Pusher and reuses it for remote interactions -// rather than starting from a clean slate. For example, it will reuse token exchanges -// when possible and avoid sending redundant HEAD requests. -// -// Reuse will take precedence over other options passed to most remote functions because -// most options deal with setting up auth and transports, which Reuse intetionally skips. -func Reuse[I *Puller | *Pusher](i I) Option { - return func(o *options) error { - if puller, ok := any(i).(*Puller); ok { - o.puller = puller - } else if pusher, ok := any(i).(*Pusher); ok { - o.pusher = pusher - } - return nil - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go deleted file mode 100644 index fe60c8c353..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2022 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "io" - "sync" - "sync/atomic" - - v1 "github.com/google/go-containerregistry/pkg/v1" -) - -type progress struct { - sync.Mutex - updates chan<- v1.Update - lastUpdate *v1.Update -} - -func (p *progress) total(delta int64) { - p.Lock() - defer p.Unlock() - atomic.AddInt64(&p.lastUpdate.Total, delta) -} - -func (p *progress) complete(delta int64) { - p.Lock() - defer p.Unlock() - p.updates <- v1.Update{ - Total: p.lastUpdate.Total, - Complete: atomic.AddInt64(&p.lastUpdate.Complete, delta), - } -} - -func (p *progress) err(err error) error { - if err != nil && p.updates != nil { - p.updates <- v1.Update{Error: err} - } - return err -} - -func (p *progress) Close(err error) { - _ = p.err(err) - close(p.updates) -} - -type progressReader struct { - rc io.ReadCloser - - count *int64 // number of bytes this reader has read, to support resetting on retry. - progress *progress -} - -func (r *progressReader) Read(b []byte) (int, error) { - n, err := r.rc.Read(b) - if err != nil { - return n, err - } - atomic.AddInt64(r.count, int64(n)) - // TODO: warn/debug log if sending takes too long, or if sending is blocked while context is canceled. - r.progress.complete(int64(n)) - return n, nil -} - -func (r *progressReader) Close() error { return r.rc.Close() } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/puller.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/puller.go deleted file mode 100644 index 7da8017eec..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/puller.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2023 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "context" - "sync" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -type Puller struct { - o *options - - // map[resource]*reader - readers sync.Map -} - -func NewPuller(options ...Option) (*Puller, error) { - o, err := makeOptions(options...) - if err != nil { - return nil, err - } - - return newPuller(o), nil -} - -func newPuller(o *options) *Puller { - if o.puller != nil { - return o.puller - } - return &Puller{ - o: o, - } -} - -type reader struct { - // in - target resource - o *options - - // f() - once sync.Once - - // out - f *fetcher - err error -} - -// this will run once per reader instance -func (r *reader) init(ctx context.Context) error { - r.once.Do(func() { - r.f, r.err = makeFetcher(ctx, r.target, r.o) - }) - return r.err -} - -func (p *Puller) fetcher(ctx context.Context, target resource) (*fetcher, error) { - v, _ := p.readers.LoadOrStore(target, &reader{ - target: target, - o: p.o, - }) - rr := v.(*reader) - return rr.f, rr.init(ctx) -} - -// Head is like remote.Head, but avoids re-authenticating when possible. -func (p *Puller) Head(ctx context.Context, ref name.Reference) (*v1.Descriptor, error) { - f, err := p.fetcher(ctx, ref.Context()) - if err != nil { - return nil, err - } - - return f.headManifest(ctx, ref, allManifestMediaTypes) -} - -// Get is like remote.Get, but avoids re-authenticating when possible. -func (p *Puller) Get(ctx context.Context, ref name.Reference) (*Descriptor, error) { - return p.get(ctx, ref, allManifestMediaTypes, p.o.platform) -} - -func (p *Puller) get(ctx context.Context, ref name.Reference, acceptable []types.MediaType, platform v1.Platform) (*Descriptor, error) { - f, err := p.fetcher(ctx, ref.Context()) - if err != nil { - return nil, err - } - return f.get(ctx, ref, acceptable, platform) -} - -// Layer is like remote.Layer, but avoids re-authenticating when possible. -func (p *Puller) Layer(ctx context.Context, ref name.Digest) (v1.Layer, error) { - f, err := p.fetcher(ctx, ref.Context()) - if err != nil { - return nil, err - } - - h, err := v1.NewHash(ref.Identifier()) - if err != nil { - return nil, err - } - l, err := partial.CompressedToLayer(&remoteLayer{ - fetcher: *f, - ctx: ctx, - digest: h, - }) - if err != nil { - return nil, err - } - return &MountableLayer{ - Layer: l, - Reference: ref, - }, nil -} - -// List lists tags in a repo and handles pagination, returning the full list of tags. -func (p *Puller) List(ctx context.Context, repo name.Repository) ([]string, error) { - lister, err := p.Lister(ctx, repo) - if err != nil { - return nil, err - } - - tagList := []string{} - for lister.HasNext() { - tags, err := lister.Next(ctx) - if err != nil { - return nil, err - } - tagList = append(tagList, tags.Tags...) - } - - return tagList, nil -} - -// Lister lists tags in a repo and returns a Lister for paginating through the results. -func (p *Puller) Lister(ctx context.Context, repo name.Repository) (*Lister, error) { - return p.lister(ctx, repo, p.o.pageSize) -} - -func (p *Puller) lister(ctx context.Context, repo name.Repository, pageSize int) (*Lister, error) { - f, err := p.fetcher(ctx, repo) - if err != nil { - return nil, err - } - page, err := f.listPage(ctx, repo, "", pageSize) - if err != nil { - return nil, err - } - return &Lister{ - f: f, - repo: repo, - pageSize: pageSize, - page: page, - err: err, - }, nil -} - -// Catalog lists repos in a registry and handles pagination, returning the full list of repos. -func (p *Puller) Catalog(ctx context.Context, reg name.Registry) ([]string, error) { - return p.catalog(ctx, reg, p.o.pageSize) -} - -func (p *Puller) catalog(ctx context.Context, reg name.Registry, pageSize int) ([]string, error) { - catalogger, err := p.catalogger(ctx, reg, pageSize) - if err != nil { - return nil, err - } - repoList := []string{} - for catalogger.HasNext() { - repos, err := catalogger.Next(ctx) - if err != nil { - return nil, err - } - repoList = append(repoList, repos.Repos...) - } - return repoList, nil -} - -// Catalogger lists repos in a registry and returns a Catalogger for paginating through the results. -func (p *Puller) Catalogger(ctx context.Context, reg name.Registry) (*Catalogger, error) { - return p.catalogger(ctx, reg, p.o.pageSize) -} - -func (p *Puller) catalogger(ctx context.Context, reg name.Registry, pageSize int) (*Catalogger, error) { - f, err := p.fetcher(ctx, reg) - if err != nil { - return nil, err - } - page, err := f.catalogPage(ctx, reg, "", pageSize) - if err != nil { - return nil, err - } - return &Catalogger{ - f: f, - reg: reg, - pageSize: pageSize, - page: page, - err: err, - }, nil -} - -func (p *Puller) referrers(ctx context.Context, d name.Digest, filter map[string]string) (v1.ImageIndex, error) { - f, err := p.fetcher(ctx, d.Context()) - if err != nil { - return nil, err - } - return f.fetchReferrers(ctx, filter, d) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go deleted file mode 100644 index 1c07bd4759..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go +++ /dev/null @@ -1,559 +0,0 @@ -// Copyright 2023 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "context" - "errors" - "fmt" - "net/http" - "net/url" - "sync" - - "github.com/google/go-containerregistry/pkg/logs" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/google/go-containerregistry/pkg/v1/stream" - "github.com/google/go-containerregistry/pkg/v1/types" - "golang.org/x/sync/errgroup" -) - -type manifest interface { - Taggable - partial.Describable -} - -// key is either v1.Hash or v1.Layer (for stream.Layer) -type workers struct { - // map[v1.Hash|v1.Layer]*sync.Once - onces sync.Map - - // map[v1.Hash|v1.Layer]error - errors sync.Map -} - -func nop() error { - return nil -} - -func (w *workers) err(digest v1.Hash) error { - v, ok := w.errors.Load(digest) - if !ok || v == nil { - return nil - } - return v.(error) -} - -func (w *workers) Do(digest v1.Hash, f func() error) error { - // We don't care if it was loaded or not because the sync.Once will do it for us. - once, _ := w.onces.LoadOrStore(digest, &sync.Once{}) - - once.(*sync.Once).Do(func() { - w.errors.Store(digest, f()) - }) - - err := w.err(digest) - if err != nil { - // Allow this to be retried by another caller. - w.onces.Delete(digest) - } - return err -} - -func (w *workers) Stream(layer v1.Layer, f func() error) error { - // We don't care if it was loaded or not because the sync.Once will do it for us. - once, _ := w.onces.LoadOrStore(layer, &sync.Once{}) - - once.(*sync.Once).Do(func() { - w.errors.Store(layer, f()) - }) - - v, ok := w.errors.Load(layer) - if !ok || v == nil { - return nil - } - - return v.(error) -} - -type Pusher struct { - o *options - - // map[name.Repository]*repoWriter - writers sync.Map -} - -func NewPusher(options ...Option) (*Pusher, error) { - o, err := makeOptions(options...) - if err != nil { - return nil, err - } - - return newPusher(o), nil -} - -func newPusher(o *options) *Pusher { - if o.pusher != nil { - return o.pusher - } - return &Pusher{ - o: o, - } -} - -func (p *Pusher) writer(ctx context.Context, repo name.Repository, o *options) (*repoWriter, error) { - v, _ := p.writers.LoadOrStore(repo, &repoWriter{ - repo: repo, - o: o, - }) - rw := v.(*repoWriter) - return rw, rw.init(ctx) -} - -func (p *Pusher) Push(ctx context.Context, ref name.Reference, t Taggable) error { - w, err := p.writer(ctx, ref.Context(), p.o) - if err != nil { - return err - } - return w.writeManifest(ctx, ref, t) -} - -func (p *Pusher) Upload(ctx context.Context, repo name.Repository, l v1.Layer) error { - w, err := p.writer(ctx, repo, p.o) - if err != nil { - return err - } - return w.writeLayer(ctx, l) -} - -func (p *Pusher) Delete(ctx context.Context, ref name.Reference) error { - w, err := p.writer(ctx, ref.Context(), p.o) - if err != nil { - return err - } - - u := url.URL{ - Scheme: ref.Context().Registry.Scheme(), - Host: ref.Context().RegistryStr(), - Path: fmt.Sprintf("/v2/%s/manifests/%s", ref.Context().RepositoryStr(), ref.Identifier()), - } - - req, err := http.NewRequest(http.MethodDelete, u.String(), nil) - if err != nil { - return err - } - - resp, err := w.w.client.Do(req.WithContext(ctx)) - if err != nil { - return err - } - defer resp.Body.Close() - - return transport.CheckError(resp, http.StatusOK, http.StatusAccepted) - - // TODO(jason): If the manifest had a `subject`, and if the registry - // doesn't support Referrers, update the index pointed to by the - // subject's fallback tag to remove the descriptor for this manifest. -} - -type repoWriter struct { - repo name.Repository - o *options - once sync.Once - - w *writer - err error - - work *workers -} - -// this will run once per repoWriter instance -func (rw *repoWriter) init(ctx context.Context) error { - rw.once.Do(func() { - rw.work = &workers{} - rw.w, rw.err = makeWriter(ctx, rw.repo, nil, rw.o) - }) - return rw.err -} - -func (rw *repoWriter) writeDeps(ctx context.Context, m manifest) error { - if img, ok := m.(v1.Image); ok { - return rw.writeLayers(ctx, img) - } - - if idx, ok := m.(v1.ImageIndex); ok { - return rw.writeChildren(ctx, idx) - } - - // This has no deps, not an error (e.g. something you want to just PUT). - return nil -} - -type describable struct { - desc v1.Descriptor -} - -func (d describable) Digest() (v1.Hash, error) { - return d.desc.Digest, nil -} - -func (d describable) Size() (int64, error) { - return d.desc.Size, nil -} - -func (d describable) MediaType() (types.MediaType, error) { - return d.desc.MediaType, nil -} - -type tagManifest struct { - Taggable - partial.Describable -} - -func taggableToManifest(t Taggable) (manifest, error) { - if m, ok := t.(manifest); ok { - return m, nil - } - - if d, ok := t.(*Descriptor); ok { - if d.MediaType.IsIndex() { - return d.ImageIndex() - } - - if d.MediaType.IsImage() { - return d.Image() - } - - if d.MediaType.IsSchema1() { - return d.Schema1() - } - - return tagManifest{t, describable{d.toDesc()}}, nil - } - - desc := v1.Descriptor{ - // A reasonable default if Taggable doesn't implement MediaType. - MediaType: types.DockerManifestSchema2, - } - - b, err := t.RawManifest() - if err != nil { - return nil, err - } - - if wmt, ok := t.(withMediaType); ok { - desc.MediaType, err = wmt.MediaType() - if err != nil { - return nil, err - } - } - - desc.Digest, desc.Size, err = v1.SHA256(bytes.NewReader(b)) - if err != nil { - return nil, err - } - - return tagManifest{t, describable{desc}}, nil -} - -func (rw *repoWriter) writeManifest(ctx context.Context, ref name.Reference, t Taggable) error { - m, err := taggableToManifest(t) - if err != nil { - return err - } - - needDeps := true - - digest, err := m.Digest() - if errors.Is(err, stream.ErrNotComputed) { - if err := rw.writeDeps(ctx, m); err != nil { - return err - } - - needDeps = false - - digest, err = m.Digest() - if err != nil { - return err - } - } else if err != nil { - return err - } - - // This may be a lazy child where we have no ref until digest is computed. - if ref == nil { - ref = rw.repo.Digest(digest.String()) - } - - // For tags, we want to do this check outside of our Work.Do closure because - // we don't want to dedupe based on the manifest digest. - _, byTag := ref.(name.Tag) - if byTag { - if exists, err := rw.manifestExists(ctx, ref, t); err != nil { - return err - } else if exists { - return nil - } - } - - // The following work.Do will get deduped by digest, so it won't happen unless - // this tag happens to be the first commitManifest to run for that digest. - needPut := byTag - - if err := rw.work.Do(digest, func() error { - if !byTag { - if exists, err := rw.manifestExists(ctx, ref, t); err != nil { - return err - } else if exists { - return nil - } - } - - if needDeps { - if err := rw.writeDeps(ctx, m); err != nil { - return err - } - } - - needPut = false - return rw.commitManifest(ctx, ref, m) - }); err != nil { - return err - } - - if !needPut { - return nil - } - - // Only runs for tags that got deduped by digest. - return rw.commitManifest(ctx, ref, m) -} - -func (rw *repoWriter) writeChildren(ctx context.Context, idx v1.ImageIndex) error { - children, err := partial.Manifests(idx) - if err != nil { - return err - } - - g, ctx := errgroup.WithContext(ctx) - g.SetLimit(rw.o.jobs) - - for _, child := range children { - child := child - if err := rw.writeChild(ctx, child, g); err != nil { - return err - } - } - - return g.Wait() -} - -func (rw *repoWriter) writeChild(ctx context.Context, child partial.Describable, g *errgroup.Group) error { - switch child := child.(type) { - case v1.ImageIndex: - // For recursive index, we want to do a depth-first launching of goroutines - // to avoid deadlocking. - // - // Note that this is rare, so the impact of this should be really small. - return rw.writeManifest(ctx, nil, child) - case v1.Image: - g.Go(func() error { - return rw.writeManifest(ctx, nil, child) - }) - case v1.Layer: - g.Go(func() error { - return rw.writeLayer(ctx, child) - }) - default: - // This can't happen. - return fmt.Errorf("encountered unknown child: %T", child) - } - return nil -} - -// TODO: Consider caching some representation of the tags/digests in the destination -// repository as a hint to avoid this optimistic check in cases where we will most -// likely have to do a PUT anyway, e.g. if we are overwriting a tag we just wrote. -func (rw *repoWriter) manifestExists(ctx context.Context, ref name.Reference, t Taggable) (bool, error) { - f := &fetcher{ - target: ref.Context(), - client: rw.w.client, - } - - m, err := taggableToManifest(t) - if err != nil { - return false, err - } - - digest, err := m.Digest() - if err != nil { - // Possibly due to streaming layers. - return false, nil - } - got, err := f.headManifest(ctx, ref, allManifestMediaTypes) - if err != nil { - var terr *transport.Error - if errors.As(err, &terr) { - if terr.StatusCode == http.StatusNotFound { - return false, nil - } - - // We treat a 403 here as non-fatal because this existence check is an optimization and - // some registries will return a 403 instead of a 404 in certain situations. - // E.g. https://jfrog.atlassian.net/browse/RTFACT-13797 - if terr.StatusCode == http.StatusForbidden { - logs.Debug.Printf("manifestExists unexpected 403: %v", err) - return false, nil - } - } - - return false, err - } - - if digest != got.Digest { - // Mark that we saw this digest in the registry so we don't have to check it again. - rw.work.Do(got.Digest, nop) - - return false, nil - } - - if tag, ok := ref.(name.Tag); ok { - logs.Progress.Printf("existing manifest: %s@%s", tag.Identifier(), got.Digest) - } else { - logs.Progress.Print("existing manifest: ", got.Digest) - } - - return true, nil -} - -func (rw *repoWriter) commitManifest(ctx context.Context, ref name.Reference, m manifest) error { - if rw.o.progress != nil { - size, err := m.Size() - if err != nil { - return err - } - rw.o.progress.total(size) - } - - return rw.w.commitManifest(ctx, m, ref) -} - -func (rw *repoWriter) writeLayers(pctx context.Context, img v1.Image) error { - ls, err := img.Layers() - if err != nil { - return err - } - - g, ctx := errgroup.WithContext(pctx) - g.SetLimit(rw.o.jobs) - - for _, l := range ls { - l := l - - g.Go(func() error { - return rw.writeLayer(ctx, l) - }) - } - - mt, err := img.MediaType() - if err != nil { - return err - } - - if mt.IsSchema1() { - return g.Wait() - } - - cl, err := partial.ConfigLayer(img) - if errors.Is(err, stream.ErrNotComputed) { - if err := g.Wait(); err != nil { - return err - } - - cl, err := partial.ConfigLayer(img) - if err != nil { - return err - } - - return rw.writeLayer(pctx, cl) - } else if err != nil { - return err - } - - g.Go(func() error { - return rw.writeLayer(ctx, cl) - }) - - return g.Wait() -} - -func (rw *repoWriter) writeLayer(ctx context.Context, l v1.Layer) error { - // Skip any non-distributable things. - mt, err := l.MediaType() - if err != nil { - return err - } - if !mt.IsDistributable() && !rw.o.allowNondistributableArtifacts { - return nil - } - - digest, err := l.Digest() - if err != nil { - if errors.Is(err, stream.ErrNotComputed) { - return rw.lazyWriteLayer(ctx, l) - } - return err - } - - return rw.work.Do(digest, func() error { - if rw.o.progress != nil { - size, err := l.Size() - if err != nil { - return err - } - rw.o.progress.total(size) - } - return rw.w.uploadOne(ctx, l) - }) -} - -func (rw *repoWriter) lazyWriteLayer(ctx context.Context, l v1.Layer) error { - return rw.work.Stream(l, func() error { - if err := rw.w.uploadOne(ctx, l); err != nil { - return err - } - - // Mark this upload completed. - digest, err := l.Digest() - if err != nil { - return err - } - - rw.work.Do(digest, nop) - - if rw.o.progress != nil { - size, err := l.Size() - if err != nil { - return err - } - rw.o.progress.total(size) - } - - return nil - }) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go deleted file mode 100644 index e30ca57ed8..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2023 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "context" - "errors" - "io" - "net/http" - "strings" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/empty" - "github.com/google/go-containerregistry/pkg/v1/mutate" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// Referrers returns a list of descriptors that refer to the given manifest digest. -// -// The subject manifest doesn't have to exist in the registry for there to be descriptors that refer to it. -func Referrers(d name.Digest, options ...Option) (v1.ImageIndex, error) { - o, err := makeOptions(options...) - if err != nil { - return nil, err - } - return newPuller(o).referrers(o.context, d, o.filter) -} - -// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#referrers-tag-schema -func fallbackTag(d name.Digest) name.Tag { - return d.Context().Tag(strings.Replace(d.DigestStr(), ":", "-", 1)) -} - -func (f *fetcher) fetchReferrers(ctx context.Context, filter map[string]string, d name.Digest) (v1.ImageIndex, error) { - // Check the Referrers API endpoint first. - u := f.url("referrers", d.DigestStr()) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) - if err != nil { - return nil, err - } - req.Header.Set("Accept", string(types.OCIImageIndex)) - - resp, err := f.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest); err != nil { - return nil, err - } - - var b []byte - if resp.StatusCode == http.StatusOK { - b, err = io.ReadAll(resp.Body) - if err != nil { - return nil, err - } - } else { - // The registry doesn't support the Referrers API endpoint, so we'll use the fallback tag scheme. - b, _, err = f.fetchManifest(ctx, fallbackTag(d), []types.MediaType{types.OCIImageIndex}) - var terr *transport.Error - if errors.As(err, &terr) && terr.StatusCode == http.StatusNotFound { - // Not found just means there are no attachments yet. Start with an empty manifest. - return empty.Index, nil - } else if err != nil { - return nil, err - } - } - - h, sz, err := v1.SHA256(bytes.NewReader(b)) - if err != nil { - return nil, err - } - idx := &remoteIndex{ - fetcher: *f, - ctx: ctx, - manifest: b, - mediaType: types.OCIImageIndex, - descriptor: &v1.Descriptor{ - Digest: h, - MediaType: types.OCIImageIndex, - Size: sz, - }, - } - return filterReferrersResponse(filter, idx), nil -} - -// If filter applied, filter out by artifactType. -// See https://github.com/opencontainers/distribution-spec/blob/main/spec.md#listing-referrers -func filterReferrersResponse(filter map[string]string, in v1.ImageIndex) v1.ImageIndex { - if filter == nil { - return in - } - v, ok := filter["artifactType"] - if !ok { - return in - } - return mutate.RemoveManifests(in, func(desc v1.Descriptor) bool { - return desc.ArtifactType != v - }) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/schema1.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/schema1.go deleted file mode 100644 index 4bc1c4c457..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/schema1.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2023 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -type schema1 struct { - ref name.Reference - ctx context.Context - fetcher fetcher - manifest []byte - mediaType types.MediaType - descriptor *v1.Descriptor -} - -func (s *schema1) Layers() ([]v1.Layer, error) { - m := schema1Manifest{} - if err := json.NewDecoder(bytes.NewReader(s.manifest)).Decode(&m); err != nil { - return nil, err - } - - layers := []v1.Layer{} - for i := len(m.FSLayers) - 1; i >= 0; i-- { - fsl := m.FSLayers[i] - - h, err := v1.NewHash(fsl.BlobSum) - if err != nil { - return nil, err - } - l, err := s.LayerByDigest(h) - if err != nil { - return nil, err - } - layers = append(layers, l) - } - - return layers, nil -} - -func (s *schema1) MediaType() (types.MediaType, error) { - return s.mediaType, nil -} - -func (s *schema1) Size() (int64, error) { - return s.descriptor.Size, nil -} - -func (s *schema1) ConfigName() (v1.Hash, error) { - return partial.ConfigName(s) -} - -func (s *schema1) ConfigFile() (*v1.ConfigFile, error) { - return nil, newErrSchema1(s.mediaType) -} - -func (s *schema1) RawConfigFile() ([]byte, error) { - return []byte("{}"), nil -} - -func (s *schema1) Digest() (v1.Hash, error) { - return s.descriptor.Digest, nil -} - -func (s *schema1) Manifest() (*v1.Manifest, error) { - return nil, newErrSchema1(s.mediaType) -} - -func (s *schema1) RawManifest() ([]byte, error) { - return s.manifest, nil -} - -func (s *schema1) LayerByDigest(h v1.Hash) (v1.Layer, error) { - l, err := partial.CompressedToLayer(&remoteLayer{ - fetcher: s.fetcher, - ctx: s.ctx, - digest: h, - }) - if err != nil { - return nil, err - } - return &MountableLayer{ - Layer: l, - Reference: s.ref.Context().Digest(h.String()), - }, nil -} - -func (s *schema1) LayerByDiffID(v1.Hash) (v1.Layer, error) { - return nil, newErrSchema1(s.mediaType) -} - -type fslayer struct { - BlobSum string `json:"blobSum"` -} - -type schema1Manifest struct { - FSLayers []fslayer `json:"fsLayers"` -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md deleted file mode 100644 index bd4d957b0e..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md +++ /dev/null @@ -1,129 +0,0 @@ -# `transport` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport) - -The [distribution protocol](https://github.com/opencontainers/distribution-spec) is fairly simple, but correctly [implementing authentication](../../../authn/README.md) is **hard**. - -This package [implements](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#New) an [`http.RoundTripper`](https://godoc.org/net/http#RoundTripper) -that transparently performs: -* [Token -Authentication](https://docs.docker.com/registry/spec/auth/token/) and -* [OAuth2 -Authentication](https://docs.docker.com/registry/spec/auth/oauth/) - -for registry clients. - -## Raison d'être - -> Why not just use the [`docker/distribution`](https://godoc.org/github.com/docker/distribution/registry/client/auth) client? - -Great question! Mostly, because I don't want to depend on [`prometheus/client_golang`](https://github.com/prometheus/client_golang). - -As a performance optimization, that client uses [a cache](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/client/repository.go#L173) to keep track of a mapping between blob digests and their [descriptors](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/blobs.go#L57-L86). Unfortunately, the cache [uses prometheus](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/storage/cache/cachedblobdescriptorstore.go#L44) to track hits and misses, so if you want to use that client you have to pull in all of prometheus, which is pretty large. - -![docker/distribution](../../../../images/docker.dot.svg) - -> Why does it matter if you depend on prometheus? Who cares? - -It's generally polite to your downstream to reduce the number of dependencies your package requires: - -* Downloading your package is faster, which helps our Australian friends and people on airplanes. -* There is less code to compile, which speeds up builds and saves the planet from global warming. -* You reduce the likelihood of inflicting dependency hell upon your consumers. -* [Tim Hockin](https://twitter.com/thockin/status/958606077456654336) prefers it based on his experience working on Kubernetes, and he's a pretty smart guy. - -> Okay, what about [`containerd/containerd`](https://godoc.org/github.com/containerd/containerd/remotes/docker)? - -Similar reasons! That ends up pulling in grpc, protobuf, and logrus. - -![containerd/containerd](../../../../images/containerd.dot.svg) - -> Well... what about [`containers/image`](https://godoc.org/github.com/containers/image/docker)? - -That just uses the the `docker/distribution` client... and more! - -![containers/image](../../../../images/containers.dot.svg) - -> Wow, what about this package? - -Of course, this package isn't perfect either. `transport` depends on `authn`, -which in turn depends on docker's config file parsing and handling package, -which you don't strictly need but almost certainly want if you're going to be -interacting with a registry. - -![google/go-containerregistry](../../../../images/ggcr.dot.svg) - -*These graphs were generated by -[`kisielk/godepgraph`](https://github.com/kisielk/godepgraph).* - -## Usage - -This is heavily used by the -[`remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) -package, which implements higher level image-centric functionality, but this -package is useful if you want to interact directly with the registry to do -something that `remote` doesn't support, e.g. [to handle with schema 1 -images](https://github.com/google/go-containerregistry/pull/509). - -This package also includes some [error -handling](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#errors) -facilities in the form of -[`CheckError`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#CheckError), -which will parse the response body into a structured error for unexpected http -status codes. - -Here's a "simple" program that writes the result of -[listing tags](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#tags) -for [`gcr.io/google-containers/pause`](https://gcr.io/google-containers/pause) -to stdout. - -```go -package main - -import ( - "io" - "net/http" - "os" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" -) - -func main() { - repo, err := name.NewRepository("gcr.io/google-containers/pause") - if err != nil { - panic(err) - } - - // Fetch credentials based on your docker config file, which is $HOME/.docker/config.json or $DOCKER_CONFIG. - auth, err := authn.DefaultKeychain.Resolve(repo.Registry) - if err != nil { - panic(err) - } - - // Construct an http.Client that is authorized to pull from gcr.io/google-containers/pause. - scopes := []string{repo.Scope(transport.PullScope)} - t, err := transport.New(repo.Registry, auth, http.DefaultTransport, scopes) - if err != nil { - panic(err) - } - client := &http.Client{Transport: t} - - // Make the actual request. - resp, err := client.Get("https://gcr.io/v2/google-containers/pause/tags/list") - if err != nil { - panic(err) - } - - // Assert that we get a 200, otherwise attempt to parse body as a structured error. - if err := transport.CheckError(resp, http.StatusOK); err != nil { - panic(err) - } - - // Write the response to stdout. - if _, err := io.Copy(os.Stdout, resp.Body); err != nil { - panic(err) - } -} -``` diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go deleted file mode 100644 index fdb362b762..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "encoding/base64" - "fmt" - "net/http" - - "github.com/google/go-containerregistry/pkg/authn" -) - -type basicTransport struct { - inner http.RoundTripper - auth authn.Authenticator - target string -} - -var _ http.RoundTripper = (*basicTransport)(nil) - -// RoundTrip implements http.RoundTripper -func (bt *basicTransport) RoundTrip(in *http.Request) (*http.Response, error) { - if bt.auth != authn.Anonymous { - auth, err := bt.auth.Authorization() - if err != nil { - return nil, err - } - - // http.Client handles redirects at a layer above the http.RoundTripper - // abstraction, so to avoid forwarding Authorization headers to places - // we are redirected, only set it when the authorization header matches - // the host with which we are interacting. - // In case of redirect http.Client can use an empty Host, check URL too. - if in.Host == bt.target || in.URL.Host == bt.target { - if bearer := auth.RegistryToken; bearer != "" { - hdr := fmt.Sprintf("Bearer %s", bearer) - in.Header.Set("Authorization", hdr) - } else if user, pass := auth.Username, auth.Password; user != "" && pass != "" { - delimited := fmt.Sprintf("%s:%s", user, pass) - encoded := base64.StdEncoding.EncodeToString([]byte(delimited)) - hdr := fmt.Sprintf("Basic %s", encoded) - in.Header.Set("Authorization", hdr) - } else if token := auth.Auth; token != "" { - hdr := fmt.Sprintf("Basic %s", token) - in.Header.Set("Authorization", hdr) - } - } - } - return bt.inner.RoundTrip(in) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go deleted file mode 100644 index cb15674969..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go +++ /dev/null @@ -1,391 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/url" - "strings" - - authchallenge "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/google/go-containerregistry/internal/redact" - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/logs" - "github.com/google/go-containerregistry/pkg/name" -) - -type Token struct { - Token string `json:"token"` - AccessToken string `json:"access_token,omitempty"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` -} - -// Exchange requests a registry Token with the given scopes. -func Exchange(ctx context.Context, reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string, pr *Challenge) (*Token, error) { - if strings.ToLower(pr.Scheme) != "bearer" { - // TODO: Pretend token for basic? - return nil, fmt.Errorf("challenge scheme %q is not bearer", pr.Scheme) - } - bt, err := fromChallenge(reg, auth, t, pr, scopes...) - if err != nil { - return nil, err - } - authcfg, err := auth.Authorization() - if err != nil { - return nil, err - } - tok, err := bt.Refresh(ctx, authcfg) - if err != nil { - return nil, err - } - return tok, nil -} - -// FromToken returns a transport given a Challenge + Token. -func FromToken(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, pr *Challenge, tok *Token) (http.RoundTripper, error) { - if strings.ToLower(pr.Scheme) != "bearer" { - return &Wrapper{&basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}}, nil - } - bt, err := fromChallenge(reg, auth, t, pr) - if err != nil { - return nil, err - } - if tok.Token != "" { - bt.bearer.RegistryToken = tok.Token - } - return &Wrapper{bt}, nil -} - -func fromChallenge(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, pr *Challenge, scopes ...string) (*bearerTransport, error) { - // We require the realm, which tells us where to send our Basic auth to turn it into Bearer auth. - realm, ok := pr.Parameters["realm"] - if !ok { - return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.Parameters) - } - service := pr.Parameters["service"] - scheme := "https" - if pr.Insecure { - scheme = "http" - } - return &bearerTransport{ - inner: t, - basic: auth, - realm: realm, - registry: reg, - service: service, - scopes: scopes, - scheme: scheme, - }, nil -} - -type bearerTransport struct { - // Wrapped by bearerTransport. - inner http.RoundTripper - // Basic credentials that we exchange for bearer tokens. - basic authn.Authenticator - // Holds the bearer response from the token service. - bearer authn.AuthConfig - // Registry to which we send bearer tokens. - registry name.Registry - // See https://tools.ietf.org/html/rfc6750#section-3 - realm string - // See https://docs.docker.com/registry/spec/auth/token/ - service string - scopes []string - // Scheme we should use, determined by ping response. - scheme string -} - -var _ http.RoundTripper = (*bearerTransport)(nil) - -var portMap = map[string]string{ - "http": "80", - "https": "443", -} - -func stringSet(ss []string) map[string]struct{} { - set := make(map[string]struct{}) - for _, s := range ss { - set[s] = struct{}{} - } - return set -} - -// RoundTrip implements http.RoundTripper -func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { - sendRequest := func() (*http.Response, error) { - // http.Client handles redirects at a layer above the http.RoundTripper - // abstraction, so to avoid forwarding Authorization headers to places - // we are redirected, only set it when the authorization header matches - // the registry with which we are interacting. - // In case of redirect http.Client can use an empty Host, check URL too. - if matchesHost(bt.registry.RegistryStr(), in, bt.scheme) { - hdr := fmt.Sprintf("Bearer %s", bt.bearer.RegistryToken) - in.Header.Set("Authorization", hdr) - } - return bt.inner.RoundTrip(in) - } - - res, err := sendRequest() - if err != nil { - return nil, err - } - - // If we hit a WWW-Authenticate challenge, it might be due to expired tokens or insufficient scope. - if challenges := authchallenge.ResponseChallenges(res); len(challenges) != 0 { - // close out old response, since we will not return it. - res.Body.Close() - - newScopes := []string{} - for _, wac := range challenges { - // TODO(jonjohnsonjr): Should we also update "realm" or "service"? - if want, ok := wac.Parameters["scope"]; ok { - // Add any scopes that we don't already request. - got := stringSet(bt.scopes) - if _, ok := got[want]; !ok { - newScopes = append(newScopes, want) - } - } - } - - // Some registries seem to only look at the first scope parameter during a token exchange. - // If a request fails because it's missing a scope, we should put those at the beginning, - // otherwise the registry might just ignore it :/ - newScopes = append(newScopes, bt.scopes...) - bt.scopes = newScopes - - // TODO(jonjohnsonjr): Teach transport.Error about "error" and "error_description" from challenge. - - // Retry the request to attempt to get a valid token. - if err = bt.refresh(in.Context()); err != nil { - return nil, err - } - return sendRequest() - } - - return res, err -} - -// It's unclear which authentication flow to use based purely on the protocol, -// so we rely on heuristics and fallbacks to support as many registries as possible. -// The basic token exchange is attempted first, falling back to the oauth flow. -// If the IdentityToken is set, this indicates that we should start with the oauth flow. -func (bt *bearerTransport) refresh(ctx context.Context) error { - auth, err := bt.basic.Authorization() - if err != nil { - return err - } - - if auth.RegistryToken != "" { - bt.bearer.RegistryToken = auth.RegistryToken - return nil - } - - response, err := bt.Refresh(ctx, auth) - if err != nil { - return err - } - - // Some registries set access_token instead of token. See #54. - if response.AccessToken != "" { - response.Token = response.AccessToken - } - - // Find a token to turn into a Bearer authenticator - if response.Token != "" { - bt.bearer.RegistryToken = response.Token - } - - // If we obtained a refresh token from the oauth flow, use that for refresh() now. - if response.RefreshToken != "" { - bt.basic = authn.FromConfig(authn.AuthConfig{ - IdentityToken: response.RefreshToken, - }) - } - - return nil -} - -func (bt *bearerTransport) Refresh(ctx context.Context, auth *authn.AuthConfig) (*Token, error) { - var ( - content []byte - err error - ) - if auth.IdentityToken != "" { - // If the secret being stored is an identity token, - // the Username should be set to , which indicates - // we are using an oauth flow. - content, err = bt.refreshOauth(ctx) - var terr *Error - if errors.As(err, &terr) && terr.StatusCode == http.StatusNotFound { - // Note: Not all token servers implement oauth2. - // If the request to the endpoint returns 404 using the HTTP POST method, - // refer to Token Documentation for using the HTTP GET method supported by all token servers. - content, err = bt.refreshBasic(ctx) - } - } else { - content, err = bt.refreshBasic(ctx) - } - if err != nil { - return nil, err - } - - var response Token - if err := json.Unmarshal(content, &response); err != nil { - return nil, err - } - - if response.Token == "" && response.AccessToken == "" { - return &response, fmt.Errorf("no token in bearer response:\n%s", content) - } - - return &response, nil -} - -func matchesHost(host string, in *http.Request, scheme string) bool { - canonicalHeaderHost := canonicalAddress(in.Host, scheme) - canonicalURLHost := canonicalAddress(in.URL.Host, scheme) - canonicalRegistryHost := canonicalAddress(host, scheme) - return canonicalHeaderHost == canonicalRegistryHost || canonicalURLHost == canonicalRegistryHost -} - -func canonicalAddress(host, scheme string) (address string) { - // The host may be any one of: - // - hostname - // - hostname:port - // - ipv4 - // - ipv4:port - // - ipv6 - // - [ipv6]:port - // As net.SplitHostPort returns an error if the host does not contain a port, we should only attempt - // to call it when we know that the address contains a port - if strings.Count(host, ":") == 1 || (strings.Count(host, ":") >= 2 && strings.Contains(host, "]:")) { - hostname, port, err := net.SplitHostPort(host) - if err != nil { - return host - } - if port == "" { - port = portMap[scheme] - } - - return net.JoinHostPort(hostname, port) - } - - return net.JoinHostPort(host, portMap[scheme]) -} - -// https://docs.docker.com/registry/spec/auth/oauth/ -func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) { - auth, err := bt.basic.Authorization() - if err != nil { - return nil, err - } - - u, err := url.Parse(bt.realm) - if err != nil { - return nil, err - } - - v := url.Values{} - v.Set("scope", strings.Join(bt.scopes, " ")) - if bt.service != "" { - v.Set("service", bt.service) - } - v.Set("client_id", defaultUserAgent) - if auth.IdentityToken != "" { - v.Set("grant_type", "refresh_token") - v.Set("refresh_token", auth.IdentityToken) - } else if auth.Username != "" && auth.Password != "" { - // TODO(#629): This is unreachable. - v.Set("grant_type", "password") - v.Set("username", auth.Username) - v.Set("password", auth.Password) - v.Set("access_type", "offline") - } - - client := http.Client{Transport: bt.inner} - req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - // We don't want to log credentials. - ctx = redact.NewContext(ctx, "oauth token response contains credentials") - - resp, err := client.Do(req.WithContext(ctx)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := CheckError(resp, http.StatusOK); err != nil { - if bt.basic == authn.Anonymous { - logs.Warn.Printf("No matching credentials were found for %q", bt.registry) - } - return nil, err - } - - return io.ReadAll(resp.Body) -} - -// https://docs.docker.com/registry/spec/auth/token/ -func (bt *bearerTransport) refreshBasic(ctx context.Context) ([]byte, error) { - u, err := url.Parse(bt.realm) - if err != nil { - return nil, err - } - b := &basicTransport{ - inner: bt.inner, - auth: bt.basic, - target: u.Host, - } - client := http.Client{Transport: b} - - v := u.Query() - v["scope"] = bt.scopes - v.Set("service", bt.service) - u.RawQuery = v.Encode() - - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, err - } - - // We don't want to log credentials. - ctx = redact.NewContext(ctx, "basic token response contains credentials") - - resp, err := client.Do(req.WithContext(ctx)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := CheckError(resp, http.StatusOK); err != nil { - if bt.basic == authn.Anonymous { - logs.Warn.Printf("No matching credentials were found for %q", bt.registry) - } - return nil, err - } - - return io.ReadAll(resp.Body) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go deleted file mode 100644 index ff7025b5c0..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package transport provides facilities for setting up an authenticated -// http.RoundTripper given an Authenticator and base RoundTripper. See -// transport.New for more information. -package transport diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go deleted file mode 100644 index 482a4adee7..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "strings" - - "github.com/google/go-containerregistry/internal/redact" -) - -// Error implements error to support the following error specification: -// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors -type Error struct { - Errors []Diagnostic `json:"errors,omitempty"` - // The http status code returned. - StatusCode int - // The request that failed. - Request *http.Request - // The raw body if we couldn't understand it. - rawBody string - - // Bit of a hack to make it easier to force a retry. - temporary bool -} - -// Check that Error implements error -var _ error = (*Error)(nil) - -// Error implements error -func (e *Error) Error() string { - prefix := "" - if e.Request != nil { - prefix = fmt.Sprintf("%s %s: ", e.Request.Method, redact.URL(e.Request.URL)) - } - return prefix + e.responseErr() -} - -func (e *Error) responseErr() string { - switch len(e.Errors) { - case 0: - if len(e.rawBody) == 0 { - if e.Request != nil && e.Request.Method == http.MethodHead { - return fmt.Sprintf("unexpected status code %d %s (HEAD responses have no body, use GET for details)", e.StatusCode, http.StatusText(e.StatusCode)) - } - return fmt.Sprintf("unexpected status code %d %s", e.StatusCode, http.StatusText(e.StatusCode)) - } - return fmt.Sprintf("unexpected status code %d %s: %s", e.StatusCode, http.StatusText(e.StatusCode), e.rawBody) - case 1: - return e.Errors[0].String() - default: - var errors []string - for _, d := range e.Errors { - errors = append(errors, d.String()) - } - return fmt.Sprintf("multiple errors returned: %s", - strings.Join(errors, "; ")) - } -} - -// Temporary returns whether the request that preceded the error is temporary. -func (e *Error) Temporary() bool { - if e.temporary { - return true - } - - if len(e.Errors) == 0 { - _, ok := temporaryStatusCodes[e.StatusCode] - return ok - } - for _, d := range e.Errors { - if _, ok := temporaryErrorCodes[d.Code]; !ok { - return false - } - } - return true -} - -// Diagnostic represents a single error returned by a Docker registry interaction. -type Diagnostic struct { - Code ErrorCode `json:"code"` - Message string `json:"message,omitempty"` - Detail any `json:"detail,omitempty"` -} - -// String stringifies the Diagnostic in the form: $Code: $Message[; $Detail] -func (d Diagnostic) String() string { - msg := fmt.Sprintf("%s: %s", d.Code, d.Message) - if d.Detail != nil { - msg = fmt.Sprintf("%s; %v", msg, d.Detail) - } - return msg -} - -// ErrorCode is an enumeration of supported error codes. -type ErrorCode string - -// The set of error conditions a registry may return: -// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors-2 -const ( - BlobUnknownErrorCode ErrorCode = "BLOB_UNKNOWN" - BlobUploadInvalidErrorCode ErrorCode = "BLOB_UPLOAD_INVALID" - BlobUploadUnknownErrorCode ErrorCode = "BLOB_UPLOAD_UNKNOWN" - DigestInvalidErrorCode ErrorCode = "DIGEST_INVALID" - ManifestBlobUnknownErrorCode ErrorCode = "MANIFEST_BLOB_UNKNOWN" - ManifestInvalidErrorCode ErrorCode = "MANIFEST_INVALID" - ManifestUnknownErrorCode ErrorCode = "MANIFEST_UNKNOWN" - ManifestUnverifiedErrorCode ErrorCode = "MANIFEST_UNVERIFIED" - NameInvalidErrorCode ErrorCode = "NAME_INVALID" - NameUnknownErrorCode ErrorCode = "NAME_UNKNOWN" - SizeInvalidErrorCode ErrorCode = "SIZE_INVALID" - TagInvalidErrorCode ErrorCode = "TAG_INVALID" - UnauthorizedErrorCode ErrorCode = "UNAUTHORIZED" - DeniedErrorCode ErrorCode = "DENIED" - UnsupportedErrorCode ErrorCode = "UNSUPPORTED" - TooManyRequestsErrorCode ErrorCode = "TOOMANYREQUESTS" - UnknownErrorCode ErrorCode = "UNKNOWN" - - // This isn't defined by either docker or OCI spec, but is defined by docker/distribution: - // https://github.com/distribution/distribution/blob/6a977a5a754baa213041443f841705888107362a/registry/api/errcode/register.go#L60 - UnavailableErrorCode ErrorCode = "UNAVAILABLE" -) - -// TODO: Include other error types. -var temporaryErrorCodes = map[ErrorCode]struct{}{ - BlobUploadInvalidErrorCode: {}, - TooManyRequestsErrorCode: {}, - UnknownErrorCode: {}, - UnavailableErrorCode: {}, -} - -var temporaryStatusCodes = map[int]struct{}{ - http.StatusRequestTimeout: {}, - http.StatusInternalServerError: {}, - http.StatusBadGateway: {}, - http.StatusServiceUnavailable: {}, - http.StatusGatewayTimeout: {}, -} - -// CheckError returns a structured error if the response status is not in codes. -func CheckError(resp *http.Response, codes ...int) error { - for _, code := range codes { - if resp.StatusCode == code { - // This is one of the supported status codes. - return nil - } - } - - b, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - - return makeError(resp, b) -} - -func makeError(resp *http.Response, body []byte) *Error { - // https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors - structuredError := &Error{} - - // This can fail if e.g. the response body is not valid JSON. That's fine, - // we'll construct an appropriate error string from the body and status code. - _ = json.Unmarshal(body, structuredError) - - structuredError.rawBody = string(body) - structuredError.StatusCode = resp.StatusCode - structuredError.Request = resp.Request - - return structuredError -} - -func retryError(resp *http.Response) error { - b, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - - rerr := makeError(resp, b) - rerr.temporary = true - return rerr -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go deleted file mode 100644 index c341f844e6..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2020 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "fmt" - "net/http" - "net/http/httputil" - "time" - - "github.com/google/go-containerregistry/internal/redact" - "github.com/google/go-containerregistry/pkg/logs" -) - -type logTransport struct { - inner http.RoundTripper -} - -// NewLogger returns a transport that logs requests and responses to -// github.com/google/go-containerregistry/pkg/logs.Debug. -func NewLogger(inner http.RoundTripper) http.RoundTripper { - return &logTransport{inner} -} - -func (t *logTransport) RoundTrip(in *http.Request) (out *http.Response, err error) { - // Inspired by: github.com/motemen/go-loghttp - - // We redact token responses and binary blobs in response/request. - omitBody, reason := redact.FromContext(in.Context()) - if omitBody { - logs.Debug.Printf("--> %s %s [body redacted: %s]", in.Method, in.URL, reason) - } else { - logs.Debug.Printf("--> %s %s", in.Method, in.URL) - } - - // Save these headers so we can redact Authorization. - savedHeaders := in.Header.Clone() - if in.Header != nil && in.Header.Get("authorization") != "" { - in.Header.Set("authorization", "") - } - - b, err := httputil.DumpRequestOut(in, !omitBody) - if err == nil { - logs.Debug.Println(string(b)) - } else { - logs.Debug.Printf("Failed to dump request %s %s: %v", in.Method, in.URL, err) - } - - // Restore the non-redacted headers. - in.Header = savedHeaders - - start := time.Now() - out, err = t.inner.RoundTrip(in) - duration := time.Since(start) - if err != nil { - logs.Debug.Printf("<-- %v %s %s (%s)", err, in.Method, in.URL, duration) - } - if out != nil { - msg := fmt.Sprintf("<-- %d", out.StatusCode) - if out.Request != nil { - msg = fmt.Sprintf("%s %s", msg, out.Request.URL) - } - msg = fmt.Sprintf("%s (%s)", msg, duration) - - if omitBody { - msg = fmt.Sprintf("%s [body redacted: %s]", msg, reason) - } - - logs.Debug.Print(msg) - - b, err := httputil.DumpResponse(out, !omitBody) - if err == nil { - logs.Debug.Println(string(b)) - } else { - logs.Debug.Printf("Failed to dump response %s %s: %v", in.Method, in.URL, err) - } - } - return -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go deleted file mode 100644 index 799c7ea08b..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "strings" - "time" - - authchallenge "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/google/go-containerregistry/pkg/logs" - "github.com/google/go-containerregistry/pkg/name" -) - -// 300ms is the default fallback period for go's DNS dialer but we could make this configurable. -var fallbackDelay = 300 * time.Millisecond - -type Challenge struct { - Scheme string - - // Following the challenge there are often key/value pairs - // e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz" - Parameters map[string]string - - // Whether we had to use http to complete the Ping. - Insecure bool -} - -// Ping does a GET /v2/ against the registry and returns the response. -func Ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*Challenge, error) { - // This first attempts to use "https" for every request, falling back to http - // if the registry matches our localhost heuristic or if it is intentionally - // set to insecure via name.NewInsecureRegistry. - schemes := []string{"https"} - if reg.Scheme() == "http" { - schemes = append(schemes, "http") - } - if len(schemes) == 1 { - return pingSingle(ctx, reg, t, schemes[0]) - } - return pingParallel(ctx, reg, t, schemes) -} - -func pingSingle(ctx context.Context, reg name.Registry, t http.RoundTripper, scheme string) (*Challenge, error) { - client := http.Client{Transport: t} - url := fmt.Sprintf("%s://%s/v2/", scheme, reg.RegistryStr()) - req, err := http.NewRequest(http.MethodGet, url, nil) - if err != nil { - return nil, err - } - resp, err := client.Do(req.WithContext(ctx)) - if err != nil { - return nil, err - } - defer func() { - // By draining the body, make sure to reuse the connection made by - // the ping for the following access to the registry - io.Copy(io.Discard, resp.Body) - resp.Body.Close() - }() - - insecure := scheme == "http" - - switch resp.StatusCode { - case http.StatusOK: - // If we get a 200, then no authentication is needed. - return &Challenge{ - Insecure: insecure, - }, nil - case http.StatusUnauthorized: - if challenges := authchallenge.ResponseChallenges(resp); len(challenges) != 0 { - // If we hit more than one, let's try to find one that we know how to handle. - wac := pickFromMultipleChallenges(challenges) - return &Challenge{ - Scheme: wac.Scheme, - Parameters: wac.Parameters, - Insecure: insecure, - }, nil - } - // Otherwise, just return the challenge without parameters. - return &Challenge{ - Scheme: resp.Header.Get("WWW-Authenticate"), - Insecure: insecure, - }, nil - default: - return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized) - } -} - -// Based on the golang happy eyeballs dialParallel impl in net/dial.go. -func pingParallel(ctx context.Context, reg name.Registry, t http.RoundTripper, schemes []string) (*Challenge, error) { - returned := make(chan struct{}) - defer close(returned) - - type pingResult struct { - *Challenge - error - primary bool - done bool - } - - results := make(chan pingResult) - - startRacer := func(ctx context.Context, scheme string) { - pr, err := pingSingle(ctx, reg, t, scheme) - select { - case results <- pingResult{Challenge: pr, error: err, primary: scheme == "https", done: true}: - case <-returned: - if pr != nil { - logs.Debug.Printf("%s lost race", scheme) - } - } - } - - var primary, fallback pingResult - - primaryCtx, primaryCancel := context.WithCancel(ctx) - defer primaryCancel() - go startRacer(primaryCtx, schemes[0]) - - fallbackTimer := time.NewTimer(fallbackDelay) - defer fallbackTimer.Stop() - - for { - select { - case <-fallbackTimer.C: - fallbackCtx, fallbackCancel := context.WithCancel(ctx) - defer fallbackCancel() - go startRacer(fallbackCtx, schemes[1]) - - case res := <-results: - if res.error == nil { - return res.Challenge, nil - } - if res.primary { - primary = res - } else { - fallback = res - } - if primary.done && fallback.done { - return nil, multierrs{primary.error, fallback.error} - } - if res.primary && fallbackTimer.Stop() { - // Primary failed and we haven't started the fallback, - // reset time to start fallback immediately. - fallbackTimer.Reset(0) - } - } - } -} - -func pickFromMultipleChallenges(challenges []authchallenge.Challenge) authchallenge.Challenge { - // It might happen there are multiple www-authenticate headers, e.g. `Negotiate` and `Basic`. - // Picking simply the first one could result eventually in `unrecognized challenge` error, - // that's why we're looping through the challenges in search for one that can be handled. - allowedSchemes := []string{"basic", "bearer"} - - for _, wac := range challenges { - currentScheme := strings.ToLower(wac.Scheme) - for _, allowed := range allowedSchemes { - if allowed == currentScheme { - return wac - } - } - } - - return challenges[0] -} - -type multierrs []error - -func (m multierrs) Error() string { - var b strings.Builder - hasWritten := false - for _, err := range m { - if hasWritten { - b.WriteString("; ") - } - hasWritten = true - b.WriteString(err.Error()) - } - return b.String() -} - -func (m multierrs) As(target any) bool { - for _, err := range m { - if errors.As(err, target) { - return true - } - } - return false -} - -func (m multierrs) Is(target error) bool { - for _, err := range m { - if errors.Is(err, target) { - return true - } - } - return false -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go deleted file mode 100644 index 093f55d02f..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net/http" - "time" - - "github.com/google/go-containerregistry/internal/retry" -) - -// Sleep for 0.1 then 0.3 seconds. This should cover networking blips. -var defaultBackoff = retry.Backoff{ - Duration: 100 * time.Millisecond, - Factor: 3.0, - Jitter: 0.1, - Steps: 3, -} - -var _ http.RoundTripper = (*retryTransport)(nil) - -// retryTransport wraps a RoundTripper and retries temporary network errors. -type retryTransport struct { - inner http.RoundTripper - backoff retry.Backoff - predicate retry.Predicate - codes []int -} - -// Option is a functional option for retryTransport. -type Option func(*options) - -type options struct { - backoff retry.Backoff - predicate retry.Predicate - codes []int -} - -// Backoff is an alias of retry.Backoff to expose this configuration option to consumers of this lib -type Backoff = retry.Backoff - -// WithRetryBackoff sets the backoff for retry operations. -func WithRetryBackoff(backoff Backoff) Option { - return func(o *options) { - o.backoff = backoff - } -} - -// WithRetryPredicate sets the predicate for retry operations. -func WithRetryPredicate(predicate func(error) bool) Option { - return func(o *options) { - o.predicate = predicate - } -} - -// WithRetryStatusCodes sets which http response codes will be retried. -func WithRetryStatusCodes(codes ...int) Option { - return func(o *options) { - o.codes = codes - } -} - -// NewRetry returns a transport that retries errors. -func NewRetry(inner http.RoundTripper, opts ...Option) http.RoundTripper { - o := &options{ - backoff: defaultBackoff, - predicate: retry.IsTemporary, - } - - for _, opt := range opts { - opt(o) - } - - return &retryTransport{ - inner: inner, - backoff: o.backoff, - predicate: o.predicate, - codes: o.codes, - } -} - -func (t *retryTransport) RoundTrip(in *http.Request) (out *http.Response, err error) { - roundtrip := func() error { - out, err = t.inner.RoundTrip(in) - if !retry.Ever(in.Context()) { - return nil - } - if out != nil { - for _, code := range t.codes { - if out.StatusCode == code { - return retryError(out) - } - } - } - return err - } - retry.Retry(roundtrip, t.predicate, t.backoff) - return -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go deleted file mode 100644 index 05844db136..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net/http" - - "github.com/google/go-containerregistry/pkg/name" -) - -type schemeTransport struct { - // Scheme we should use, determined by ping response. - scheme string - - // Registry we're talking to. - registry name.Registry - - // Wrapped by schemeTransport. - inner http.RoundTripper -} - -// RoundTrip implements http.RoundTripper -func (st *schemeTransport) RoundTrip(in *http.Request) (*http.Response, error) { - // When we ping() the registry, we determine whether to use http or https - // based on which scheme was successful. That is only valid for the - // registry server and not e.g. a separate token server or blob storage, - // so we should only override the scheme if the host is the registry. - if matchesHost(st.registry.String(), in, st.scheme) { - in.URL.Scheme = st.scheme - } - return st.inner.RoundTrip(in) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go deleted file mode 100644 index c3b56f7a41..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -// Scopes suitable to qualify each Repository -const ( - PullScope string = "pull" - PushScope string = "push,pull" - // For now DELETE is PUSH, which is the read/write ACL. - DeleteScope string = PushScope - CatalogScope string = "catalog" -) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go deleted file mode 100644 index bd539b44fb..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "context" - "net/http" - "strings" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" -) - -// New returns a new RoundTripper based on the provided RoundTripper that has been -// setup to authenticate with the remote registry "reg", in the capacity -// laid out by the specified scopes. -// -// Deprecated: Use NewWithContext. -func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { - return NewWithContext(context.Background(), reg, auth, t, scopes) -} - -// NewWithContext returns a new RoundTripper based on the provided RoundTripper that has been -// set up to authenticate with the remote registry "reg", in the capacity -// laid out by the specified scopes. -// In case the RoundTripper is already of the type Wrapper it assumes -// authentication was already done prior to this call, so it just returns -// the provided RoundTripper without further action -func NewWithContext(ctx context.Context, reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { - // When the transport provided is of the type Wrapper this function assumes that the caller already - // executed the necessary login and check. - switch t.(type) { - case *Wrapper: - return t, nil - } - // The handshake: - // 1. Use "t" to ping() the registry for the authentication challenge. - // - // 2a. If we get back a 200, then simply use "t". - // - // 2b. If we get back a 401 with a Basic challenge, then use a transport - // that just attachs auth each roundtrip. - // - // 2c. If we get back a 401 with a Bearer challenge, then use a transport - // that attaches a bearer token to each request, and refreshes is on 401s. - // Perform an initial refresh to seed the bearer token. - - // First we ping the registry to determine the parameters of the authentication handshake - // (if one is even necessary). - pr, err := Ping(ctx, reg, t) - if err != nil { - return nil, err - } - - // Wrap t with a useragent transport unless we already have one. - if _, ok := t.(*userAgentTransport); !ok { - t = NewUserAgent(t, "") - } - - scheme := "https" - if pr.Insecure { - scheme = "http" - } - - // Wrap t in a transport that selects the appropriate scheme based on the ping response. - t = &schemeTransport{ - scheme: scheme, - registry: reg, - inner: t, - } - - if strings.ToLower(pr.Scheme) != "bearer" { - return &Wrapper{&basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}}, nil - } - - bt, err := fromChallenge(reg, auth, t, pr) - if err != nil { - return nil, err - } - bt.scopes = scopes - - if err := bt.refresh(ctx); err != nil { - return nil, err - } - return &Wrapper{bt}, nil -} - -// Wrapper results in *not* wrapping supplied transport with additional logic such as retries, useragent and debug logging -// Consumers are opt-ing into providing their own transport without any additional wrapping. -type Wrapper struct { - inner http.RoundTripper -} - -// RoundTrip delegates to the inner RoundTripper -func (w *Wrapper) RoundTrip(in *http.Request) (*http.Response, error) { - return w.inner.RoundTrip(in) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go deleted file mode 100644 index 74a9e71bdf..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "fmt" - "net/http" - "runtime/debug" -) - -var ( - // Version can be set via: - // -ldflags="-X 'github.com/google/go-containerregistry/pkg/v1/remote/transport.Version=$TAG'" - Version string - - ggcrVersion = defaultUserAgent -) - -const ( - defaultUserAgent = "go-containerregistry" - moduleName = "github.com/google/go-containerregistry" -) - -type userAgentTransport struct { - inner http.RoundTripper - ua string -} - -func init() { - if v := version(); v != "" { - ggcrVersion = fmt.Sprintf("%s/%s", defaultUserAgent, v) - } -} - -func version() string { - if Version != "" { - // Version was set via ldflags, just return it. - return Version - } - - info, ok := debug.ReadBuildInfo() - if !ok { - return "" - } - - // Happens for crane and gcrane. - if info.Main.Path == moduleName { - return info.Main.Version - } - - // Anything else. - for _, dep := range info.Deps { - if dep.Path == moduleName { - return dep.Version - } - } - - return "" -} - -// NewUserAgent returns an http.Roundtripper that sets the user agent to -// The provided string plus additional go-containerregistry information, -// e.g. if provided "crane/v0.1.4" and this modules was built at v0.1.4: -// -// User-Agent: crane/v0.1.4 go-containerregistry/v0.1.4 -func NewUserAgent(inner http.RoundTripper, ua string) http.RoundTripper { - if ua == "" { - ua = ggcrVersion - } else { - ua = fmt.Sprintf("%s %s", ua, ggcrVersion) - } - return &userAgentTransport{ - inner: inner, - ua: ua, - } -} - -// RoundTrip implements http.RoundTripper -func (ut *userAgentTransport) RoundTrip(in *http.Request) (*http.Response, error) { - in.Header.Set("User-Agent", ut.ua) - return ut.inner.RoundTrip(in) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go deleted file mode 100644 index 04a3989a6e..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go +++ /dev/null @@ -1,713 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "sort" - "strings" - "sync" - - "github.com/google/go-containerregistry/internal/redact" - "github.com/google/go-containerregistry/internal/retry" - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/logs" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/google/go-containerregistry/pkg/v1/stream" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// Taggable is an interface that enables a manifest PUT (e.g. for tagging). -type Taggable interface { - RawManifest() ([]byte, error) -} - -// Write pushes the provided img to the specified image reference. -func Write(ref name.Reference, img v1.Image, options ...Option) (rerr error) { - o, err := makeOptions(options...) - if err != nil { - return err - } - if o.progress != nil { - defer func() { o.progress.Close(rerr) }() - } - return newPusher(o).Push(o.context, ref, img) -} - -// writer writes the elements of an image to a remote image reference. -type writer struct { - repo name.Repository - auth authn.Authenticator - transport http.RoundTripper - - client *http.Client - - progress *progress - backoff Backoff - predicate retry.Predicate - - scopeLock sync.Mutex - // Keep track of scopes that we have already requested. - scopeSet map[string]struct{} - scopes []string -} - -func makeWriter(ctx context.Context, repo name.Repository, ls []v1.Layer, o *options) (*writer, error) { - auth := o.auth - if o.keychain != nil { - kauth, err := o.keychain.Resolve(repo) - if err != nil { - return nil, err - } - auth = kauth - } - scopes := scopesForUploadingImage(repo, ls) - tr, err := transport.NewWithContext(ctx, repo.Registry, auth, o.transport, scopes) - if err != nil { - return nil, err - } - - scopeSet := map[string]struct{}{} - for _, scope := range scopes { - scopeSet[scope] = struct{}{} - } - return &writer{ - repo: repo, - client: &http.Client{Transport: tr}, - auth: auth, - transport: o.transport, - progress: o.progress, - backoff: o.retryBackoff, - predicate: o.retryPredicate, - scopes: scopes, - scopeSet: scopeSet, - }, nil -} - -// url returns a url.Url for the specified path in the context of this remote image reference. -func (w *writer) url(path string) url.URL { - return url.URL{ - Scheme: w.repo.Registry.Scheme(), - Host: w.repo.RegistryStr(), - Path: path, - } -} - -func (w *writer) maybeUpdateScopes(ctx context.Context, ml *MountableLayer) error { - if ml.Reference.Context().String() == w.repo.String() { - return nil - } - if ml.Reference.Context().Registry.String() != w.repo.Registry.String() { - return nil - } - - scope := ml.Reference.Scope(transport.PullScope) - - w.scopeLock.Lock() - defer w.scopeLock.Unlock() - - if _, ok := w.scopeSet[scope]; !ok { - w.scopeSet[scope] = struct{}{} - w.scopes = append(w.scopes, scope) - - logs.Debug.Printf("Refreshing token to add scope %q", scope) - wt, err := transport.NewWithContext(ctx, w.repo.Registry, w.auth, w.transport, w.scopes) - if err != nil { - return err - } - w.client = &http.Client{Transport: wt} - } - - return nil -} - -// nextLocation extracts the fully-qualified URL to which we should send the next request in an upload sequence. -func (w *writer) nextLocation(resp *http.Response) (string, error) { - loc := resp.Header.Get("Location") - if len(loc) == 0 { - return "", errors.New("missing Location header") - } - u, err := url.Parse(loc) - if err != nil { - return "", err - } - - // If the location header returned is just a url path, then fully qualify it. - // We cannot simply call w.url, since there might be an embedded query string. - return resp.Request.URL.ResolveReference(u).String(), nil -} - -// checkExistingBlob checks if a blob exists already in the repository by making a -// HEAD request to the blob store API. GCR performs an existence check on the -// initiation if "mount" is specified, even if no "from" sources are specified. -// However, this is not broadly applicable to all registries, e.g. ECR. -func (w *writer) checkExistingBlob(ctx context.Context, h v1.Hash) (bool, error) { - u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.repo.RepositoryStr(), h.String())) - - req, err := http.NewRequest(http.MethodHead, u.String(), nil) - if err != nil { - return false, err - } - - resp, err := w.client.Do(req.WithContext(ctx)) - if err != nil { - return false, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { - return false, err - } - - return resp.StatusCode == http.StatusOK, nil -} - -// initiateUpload initiates the blob upload, which starts with a POST that can -// optionally include the hash of the layer and a list of repositories from -// which that layer might be read. On failure, an error is returned. -// On success, the layer was either mounted (nothing more to do) or a blob -// upload was initiated and the body of that blob should be sent to the returned -// location. -func (w *writer) initiateUpload(ctx context.Context, from, mount, origin string) (location string, mounted bool, err error) { - u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.repo.RepositoryStr())) - uv := url.Values{} - if mount != "" && from != "" { - // Quay will fail if we specify a "mount" without a "from". - uv.Set("mount", mount) - uv.Set("from", from) - if origin != "" { - uv.Set("origin", origin) - } - } - u.RawQuery = uv.Encode() - - // Make the request to initiate the blob upload. - req, err := http.NewRequest(http.MethodPost, u.String(), nil) - if err != nil { - return "", false, err - } - req.Header.Set("Content-Type", "application/json") - resp, err := w.client.Do(req.WithContext(ctx)) - if err != nil { - if from != "" { - // https://github.com/google/go-containerregistry/issues/1679 - logs.Warn.Printf("retrying without mount: %v", err) - return w.initiateUpload(ctx, "", "", "") - } - return "", false, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil { - if from != "" { - // https://github.com/google/go-containerregistry/issues/1404 - logs.Warn.Printf("retrying without mount: %v", err) - return w.initiateUpload(ctx, "", "", "") - } - return "", false, err - } - - // Check the response code to determine the result. - switch resp.StatusCode { - case http.StatusCreated: - // We're done, we were able to fast-path. - return "", true, nil - case http.StatusAccepted: - // Proceed to PATCH, upload has begun. - loc, err := w.nextLocation(resp) - return loc, false, err - default: - panic("Unreachable: initiateUpload") - } -} - -// streamBlob streams the contents of the blob to the specified location. -// On failure, this will return an error. On success, this will return the location -// header indicating how to commit the streamed blob. -func (w *writer) streamBlob(ctx context.Context, layer v1.Layer, streamLocation string) (commitLocation string, rerr error) { - reset := func() {} - defer func() { - if rerr != nil { - reset() - } - }() - blob, err := layer.Compressed() - if err != nil { - return "", err - } - - getBody := layer.Compressed - if w.progress != nil { - var count int64 - blob = &progressReader{rc: blob, progress: w.progress, count: &count} - getBody = func() (io.ReadCloser, error) { - blob, err := layer.Compressed() - if err != nil { - return nil, err - } - return &progressReader{rc: blob, progress: w.progress, count: &count}, nil - } - reset = func() { - w.progress.complete(-count) - } - } - - req, err := http.NewRequest(http.MethodPatch, streamLocation, blob) - if err != nil { - return "", err - } - if _, ok := layer.(*stream.Layer); !ok { - // We can't retry streaming layers. - req.GetBody = getBody - - // If we know the size, set it. - if size, err := layer.Size(); err == nil { - req.ContentLength = size - } - } - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := w.client.Do(req.WithContext(ctx)) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil { - return "", err - } - - // The blob has been uploaded, return the location header indicating - // how to commit this layer. - return w.nextLocation(resp) -} - -// commitBlob commits this blob by sending a PUT to the location returned from -// streaming the blob. -func (w *writer) commitBlob(ctx context.Context, location, digest string) error { - u, err := url.Parse(location) - if err != nil { - return err - } - v := u.Query() - v.Set("digest", digest) - u.RawQuery = v.Encode() - - req, err := http.NewRequest(http.MethodPut, u.String(), nil) - if err != nil { - return err - } - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := w.client.Do(req.WithContext(ctx)) - if err != nil { - return err - } - defer resp.Body.Close() - - return transport.CheckError(resp, http.StatusCreated) -} - -// incrProgress increments and sends a progress update, if WithProgress is used. -func (w *writer) incrProgress(written int64) { - if w.progress == nil { - return - } - w.progress.complete(written) -} - -// uploadOne performs a complete upload of a single layer. -func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error { - tryUpload := func() error { - ctx := retry.Never(ctx) - var from, mount, origin string - if h, err := l.Digest(); err == nil { - // If we know the digest, this isn't a streaming layer. Do an existence - // check so we can skip uploading the layer if possible. - existing, err := w.checkExistingBlob(ctx, h) - if err != nil { - return err - } - if existing { - size, err := l.Size() - if err != nil { - return err - } - w.incrProgress(size) - logs.Progress.Printf("existing blob: %v", h) - return nil - } - - mount = h.String() - } - if ml, ok := l.(*MountableLayer); ok { - if err := w.maybeUpdateScopes(ctx, ml); err != nil { - return err - } - - from = ml.Reference.Context().RepositoryStr() - origin = ml.Reference.Context().RegistryStr() - - // This keeps breaking with DockerHub. - // https://github.com/google/go-containerregistry/issues/1741 - if w.repo.RegistryStr() == name.DefaultRegistry && origin != w.repo.RegistryStr() { - from = "" - origin = "" - } - } - - location, mounted, err := w.initiateUpload(ctx, from, mount, origin) - if err != nil { - return err - } else if mounted { - size, err := l.Size() - if err != nil { - return err - } - w.incrProgress(size) - h, err := l.Digest() - if err != nil { - return err - } - logs.Progress.Printf("mounted blob: %s", h.String()) - return nil - } - - // Only log layers with +json or +yaml. We can let through other stuff if it becomes popular. - // TODO(opencontainers/image-spec#791): Would be great to have an actual parser. - mt, err := l.MediaType() - if err != nil { - return err - } - smt := string(mt) - if !(strings.HasSuffix(smt, "+json") || strings.HasSuffix(smt, "+yaml")) { - ctx = redact.NewContext(ctx, "omitting binary blobs from logs") - } - - location, err = w.streamBlob(ctx, l, location) - if err != nil { - return err - } - - h, err := l.Digest() - if err != nil { - return err - } - digest := h.String() - - if err := w.commitBlob(ctx, location, digest); err != nil { - return err - } - logs.Progress.Printf("pushed blob: %s", digest) - return nil - } - - return retry.Retry(tryUpload, w.predicate, w.backoff) -} - -type withMediaType interface { - MediaType() (types.MediaType, error) -} - -// This is really silly, but go interfaces don't let me satisfy remote.Taggable -// with remote.Descriptor because of name collisions between method names and -// struct fields. -// -// Use reflection to either pull the v1.Descriptor out of remote.Descriptor or -// create a descriptor based on the RawManifest and (optionally) MediaType. -func unpackTaggable(t Taggable) ([]byte, *v1.Descriptor, error) { - if d, ok := t.(*Descriptor); ok { - return d.Manifest, &d.Descriptor, nil - } - b, err := t.RawManifest() - if err != nil { - return nil, nil, err - } - - // A reasonable default if Taggable doesn't implement MediaType. - mt := types.DockerManifestSchema2 - - if wmt, ok := t.(withMediaType); ok { - m, err := wmt.MediaType() - if err != nil { - return nil, nil, err - } - mt = m - } - - h, sz, err := v1.SHA256(bytes.NewReader(b)) - if err != nil { - return nil, nil, err - } - - return b, &v1.Descriptor{ - MediaType: mt, - Size: sz, - Digest: h, - }, nil -} - -// commitSubjectReferrers is responsible for updating the fallback tag manifest to track descriptors referring to a subject for registries that don't yet support the Referrers API. -// TODO: use conditional requests to avoid race conditions -func (w *writer) commitSubjectReferrers(ctx context.Context, sub name.Digest, add v1.Descriptor) error { - // Check if the registry supports Referrers API. - // TODO: This should be done once per registry, not once per subject. - u := w.url(fmt.Sprintf("/v2/%s/referrers/%s", w.repo.RepositoryStr(), sub.DigestStr())) - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return err - } - req.Header.Set("Accept", string(types.OCIImageIndex)) - resp, err := w.client.Do(req.WithContext(ctx)) - if err != nil { - return err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest); err != nil { - return err - } - if resp.StatusCode == http.StatusOK { - // The registry supports Referrers API. The registry is responsible for updating the referrers list. - return nil - } - - // The registry doesn't support Referrers API, we need to update the manifest tagged with the fallback tag. - // Make the request to GET the current manifest. - t := fallbackTag(sub) - u = w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), t.Identifier())) - req, err = http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return err - } - req.Header.Set("Accept", string(types.OCIImageIndex)) - resp, err = w.client.Do(req.WithContext(ctx)) - if err != nil { - return err - } - defer resp.Body.Close() - - var im v1.IndexManifest - if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { - return err - } else if resp.StatusCode == http.StatusNotFound { - // Not found just means there are no attachments. Start with an empty index. - im = v1.IndexManifest{ - SchemaVersion: 2, - MediaType: types.OCIImageIndex, - Manifests: []v1.Descriptor{add}, - } - } else { - if err := json.NewDecoder(resp.Body).Decode(&im); err != nil { - return err - } - if im.SchemaVersion != 2 { - return fmt.Errorf("fallback tag manifest is not a schema version 2: %d", im.SchemaVersion) - } - if im.MediaType != types.OCIImageIndex { - return fmt.Errorf("fallback tag manifest is not an OCI image index: %s", im.MediaType) - } - for _, desc := range im.Manifests { - if desc.Digest == add.Digest { - // The digest is already attached, nothing to do. - logs.Progress.Printf("fallback tag %s already had referrer", t.Identifier()) - return nil - } - } - // Append the new descriptor to the index. - im.Manifests = append(im.Manifests, add) - } - - // Sort the manifests for reproducibility. - sort.Slice(im.Manifests, func(i, j int) bool { - return im.Manifests[i].Digest.String() < im.Manifests[j].Digest.String() - }) - logs.Progress.Printf("updating fallback tag %s with new referrer", t.Identifier()) - return w.commitManifest(ctx, fallbackTaggable{im}, t) -} - -type fallbackTaggable struct { - im v1.IndexManifest -} - -func (f fallbackTaggable) RawManifest() ([]byte, error) { return json.Marshal(f.im) } -func (f fallbackTaggable) MediaType() (types.MediaType, error) { return types.OCIImageIndex, nil } - -// commitManifest does a PUT of the image's manifest. -func (w *writer) commitManifest(ctx context.Context, t Taggable, ref name.Reference) error { - // If the manifest refers to a subject, we need to check whether we need to update the fallback tag manifest. - raw, err := t.RawManifest() - if err != nil { - return err - } - var mf struct { - MediaType types.MediaType `json:"mediaType"` - Subject *v1.Descriptor `json:"subject,omitempty"` - Config struct { - MediaType types.MediaType `json:"mediaType"` - } `json:"config"` - } - if err := json.Unmarshal(raw, &mf); err != nil { - return err - } - - tryUpload := func() error { - ctx := retry.Never(ctx) - raw, desc, err := unpackTaggable(t) - if err != nil { - return err - } - - u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), ref.Identifier())) - - // Make the request to PUT the serialized manifest - req, err := http.NewRequest(http.MethodPut, u.String(), bytes.NewBuffer(raw)) - if err != nil { - return err - } - req.Header.Set("Content-Type", string(desc.MediaType)) - - resp, err := w.client.Do(req.WithContext(ctx)) - if err != nil { - return err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil { - return err - } - - // If the manifest referred to a subject, we may need to update the fallback tag manifest. - // TODO: If this fails, we'll retry the whole upload. We should retry just this part. - if mf.Subject != nil { - h, size, err := v1.SHA256(bytes.NewReader(raw)) - if err != nil { - return err - } - desc := v1.Descriptor{ - ArtifactType: string(mf.Config.MediaType), - MediaType: mf.MediaType, - Digest: h, - Size: size, - } - if err := w.commitSubjectReferrers(ctx, - ref.Context().Digest(mf.Subject.Digest.String()), - desc); err != nil { - return err - } - } - - // The image was successfully pushed! - logs.Progress.Printf("%v: digest: %v size: %d", ref, desc.Digest, desc.Size) - w.incrProgress(int64(len(raw))) - return nil - } - - return retry.Retry(tryUpload, w.predicate, w.backoff) -} - -func scopesForUploadingImage(repo name.Repository, layers []v1.Layer) []string { - // use a map as set to remove duplicates scope strings - scopeSet := map[string]struct{}{} - - for _, l := range layers { - if ml, ok := l.(*MountableLayer); ok { - // we will add push scope for ref.Context() after the loop. - // for now we ask pull scope for references of the same registry - if ml.Reference.Context().String() != repo.String() && ml.Reference.Context().Registry.String() == repo.Registry.String() { - scopeSet[ml.Reference.Scope(transport.PullScope)] = struct{}{} - } - } - } - - scopes := make([]string, 0) - // Push scope should be the first element because a few registries just look at the first scope to determine access. - scopes = append(scopes, repo.Scope(transport.PushScope)) - - for scope := range scopeSet { - scopes = append(scopes, scope) - } - - return scopes -} - -// WriteIndex pushes the provided ImageIndex to the specified image reference. -// WriteIndex will attempt to push all of the referenced manifests before -// attempting to push the ImageIndex, to retain referential integrity. -func WriteIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) (rerr error) { - o, err := makeOptions(options...) - if err != nil { - return err - } - if o.progress != nil { - defer func() { o.progress.Close(rerr) }() - } - return newPusher(o).Push(o.context, ref, ii) -} - -// WriteLayer uploads the provided Layer to the specified repo. -func WriteLayer(repo name.Repository, layer v1.Layer, options ...Option) (rerr error) { - o, err := makeOptions(options...) - if err != nil { - return err - } - if o.progress != nil { - defer func() { o.progress.Close(rerr) }() - } - return newPusher(o).Upload(o.context, repo, layer) -} - -// Tag adds a tag to the given Taggable via PUT /v2/.../manifests/ -// -// Notable implementations of Taggable are v1.Image, v1.ImageIndex, and -// remote.Descriptor. -// -// If t implements MediaType, we will use that for the Content-Type, otherwise -// we will default to types.DockerManifestSchema2. -// -// Tag does not attempt to write anything other than the manifest, so callers -// should ensure that all blobs or manifests that are referenced by t exist -// in the target registry. -func Tag(tag name.Tag, t Taggable, options ...Option) error { - return Put(tag, t, options...) -} - -// Put adds a manifest from the given Taggable via PUT /v1/.../manifest/ -// -// Notable implementations of Taggable are v1.Image, v1.ImageIndex, and -// remote.Descriptor. -// -// If t implements MediaType, we will use that for the Content-Type, otherwise -// we will default to types.DockerManifestSchema2. -// -// Put does not attempt to write anything other than the manifest, so callers -// should ensure that all blobs or manifests that are referenced by t exist -// in the target registry. -func Put(ref name.Reference, t Taggable, options ...Option) error { - o, err := makeOptions(options...) - if err != nil { - return err - } - return newPusher(o).Push(o.context, ref, t) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/static/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/static/layer.go deleted file mode 100644 index a4bbe69487..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/static/layer.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2021 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package static - -import ( - "bytes" - "io" - "sync" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// NewLayer returns a layer containing the given bytes, with the given mediaType. -// -// Contents will not be compressed. -func NewLayer(b []byte, mt types.MediaType) v1.Layer { - return &staticLayer{b: b, mt: mt} -} - -type staticLayer struct { - b []byte - mt types.MediaType - - once sync.Once - h v1.Hash -} - -func (l *staticLayer) Digest() (v1.Hash, error) { - var err error - // Only calculate digest the first time we're asked. - l.once.Do(func() { - l.h, _, err = v1.SHA256(bytes.NewReader(l.b)) - }) - return l.h, err -} - -func (l *staticLayer) DiffID() (v1.Hash, error) { - return l.Digest() -} - -func (l *staticLayer) Compressed() (io.ReadCloser, error) { - return io.NopCloser(bytes.NewReader(l.b)), nil -} - -func (l *staticLayer) Uncompressed() (io.ReadCloser, error) { - return io.NopCloser(bytes.NewReader(l.b)), nil -} - -func (l *staticLayer) Size() (int64, error) { - return int64(len(l.b)), nil -} - -func (l *staticLayer) MediaType() (types.MediaType, error) { - return l.mt, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md deleted file mode 100644 index da0dda48d9..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# `stream` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream) - -The `stream` package contains an implementation of -[`v1.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1#Layer) -that supports _streaming_ access, i.e. the layer contents are read once and not -buffered. - -## Usage - -```go -package main - -import ( - "os" - - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/google/go-containerregistry/pkg/v1/stream" -) - -// upload the contents of stdin as a layer to a local registry -func main() { - repo, err := name.NewRepository("localhost:5000/stream") - if err != nil { - panic(err) - } - - layer := stream.NewLayer(os.Stdin) - - if err := remote.WriteLayer(repo, layer); err != nil { - panic(err) - } -} -``` - -## Structure - -This implements the layer portion of an [image -upload](/pkg/v1/remote#anatomy-of-an-image-upload). We launch a goroutine that -is responsible for hashing the uncompressed contents to compute the `DiffID`, -gzipping them to produce the `Compressed` contents, and hashing/counting the -bytes to produce the `Digest`/`Size`. This goroutine writes to an -`io.PipeWriter`, which blocks until `Compressed` reads the gzipped contents from -the corresponding `io.PipeReader`. - -

- -

- -## Caveats - -This assumes that you have an uncompressed layer (i.e. a tarball) and would like -to compress it. Calling `Uncompressed` is always an error. Likewise, other -methods are invalid until the contents of `Compressed` have been completely -consumed and `Close`d. - -Using a `stream.Layer` will likely not work without careful consideration. For -example, in the `mutate` package, we defer computing the manifest and config -file until they are actually called. This allows you to `mutate.Append` a -streaming layer to an image without accidentally consuming it. Similarly, in -`remote.Write`, if calling `Digest` on a layer fails, we attempt to upload the -layer anyway, understanding that we may be dealing with a `stream.Layer` whose -contents need to be uploaded before we can upload the config file. - -Given the [structure](#structure) of how this is implemented, forgetting to -`Close` a `stream.Layer` will leak a goroutine. diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go deleted file mode 100644 index 2b03544795..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package stream implements a single-pass streaming v1.Layer. -package stream - -import ( - "bufio" - "compress/gzip" - "crypto" - "encoding/hex" - "errors" - "hash" - "io" - "os" - "sync" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -var ( - // ErrNotComputed is returned when the requested value is not yet - // computed because the stream has not been consumed yet. - ErrNotComputed = errors.New("value not computed until stream is consumed") - - // ErrConsumed is returned by Compressed when the underlying stream has - // already been consumed and closed. - ErrConsumed = errors.New("stream was already consumed") -) - -// Layer is a streaming implementation of v1.Layer. -type Layer struct { - blob io.ReadCloser - consumed bool - compression int - - mu sync.Mutex - digest, diffID *v1.Hash - size int64 - mediaType types.MediaType -} - -var _ v1.Layer = (*Layer)(nil) - -// LayerOption applies options to layer -type LayerOption func(*Layer) - -// WithCompressionLevel sets the gzip compression. See `gzip.NewWriterLevel` for possible values. -func WithCompressionLevel(level int) LayerOption { - return func(l *Layer) { - l.compression = level - } -} - -// WithMediaType is a functional option for overriding the layer's media type. -func WithMediaType(mt types.MediaType) LayerOption { - return func(l *Layer) { - l.mediaType = mt - } -} - -// NewLayer creates a Layer from an io.ReadCloser. -func NewLayer(rc io.ReadCloser, opts ...LayerOption) *Layer { - layer := &Layer{ - blob: rc, - compression: gzip.BestSpeed, - // We use DockerLayer for now as uncompressed layers - // are unimplemented - mediaType: types.DockerLayer, - } - - for _, opt := range opts { - opt(layer) - } - - return layer -} - -// Digest implements v1.Layer. -func (l *Layer) Digest() (v1.Hash, error) { - l.mu.Lock() - defer l.mu.Unlock() - if l.digest == nil { - return v1.Hash{}, ErrNotComputed - } - return *l.digest, nil -} - -// DiffID implements v1.Layer. -func (l *Layer) DiffID() (v1.Hash, error) { - l.mu.Lock() - defer l.mu.Unlock() - if l.diffID == nil { - return v1.Hash{}, ErrNotComputed - } - return *l.diffID, nil -} - -// Size implements v1.Layer. -func (l *Layer) Size() (int64, error) { - l.mu.Lock() - defer l.mu.Unlock() - if l.size == 0 { - return 0, ErrNotComputed - } - return l.size, nil -} - -// MediaType implements v1.Layer -func (l *Layer) MediaType() (types.MediaType, error) { - return l.mediaType, nil -} - -// Uncompressed implements v1.Layer. -func (l *Layer) Uncompressed() (io.ReadCloser, error) { - return nil, errors.New("NYI: stream.Layer.Uncompressed is not implemented") -} - -// Compressed implements v1.Layer. -func (l *Layer) Compressed() (io.ReadCloser, error) { - l.mu.Lock() - defer l.mu.Unlock() - if l.consumed { - return nil, ErrConsumed - } - return newCompressedReader(l) -} - -// finalize sets the layer to consumed and computes all hash and size values. -func (l *Layer) finalize(uncompressed, compressed hash.Hash, size int64) error { - l.mu.Lock() - defer l.mu.Unlock() - - diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(uncompressed.Sum(nil))) - if err != nil { - return err - } - l.diffID = &diffID - - digest, err := v1.NewHash("sha256:" + hex.EncodeToString(compressed.Sum(nil))) - if err != nil { - return err - } - l.digest = &digest - - l.size = size - l.consumed = true - return nil -} - -type compressedReader struct { - pr io.Reader - closer func() error -} - -func newCompressedReader(l *Layer) (*compressedReader, error) { - // Collect digests of compressed and uncompressed stream and size of - // compressed stream. - h := crypto.SHA256.New() - zh := crypto.SHA256.New() - count := &countWriter{} - - // gzip.Writer writes to the output stream via pipe, a hasher to - // capture compressed digest, and a countWriter to capture compressed - // size. - pr, pw := io.Pipe() - - // Write compressed bytes to be read by the pipe.Reader, hashed by zh, and counted by count. - mw := io.MultiWriter(pw, zh, count) - - // Buffer the output of the gzip writer so we don't have to wait on pr to keep writing. - // 64K ought to be small enough for anybody. - bw := bufio.NewWriterSize(mw, 2<<16) - zw, err := gzip.NewWriterLevel(bw, l.compression) - if err != nil { - return nil, err - } - - doneDigesting := make(chan struct{}) - - cr := &compressedReader{ - pr: pr, - closer: func() error { - // Immediately close pw without error. There are three ways to get - // here. - // - // 1. There was a copy error due from the underlying reader, in which - // case the error will not be overwritten. - // 2. Copying from the underlying reader completed successfully. - // 3. Close has been called before the underlying reader has been - // fully consumed. In this case pw must be closed in order to - // keep the flush of bw from blocking indefinitely. - // - // NOTE: pw.Close never returns an error. The signature is only to - // implement io.Closer. - _ = pw.Close() - - // Close the inner ReadCloser. - // - // NOTE: net/http will call close on success, so if we've already - // closed the inner rc, it's not an error. - if err := l.blob.Close(); err != nil && !errors.Is(err, os.ErrClosed) { - return err - } - - // Finalize layer with its digest and size values. - <-doneDigesting - return l.finalize(h, zh, count.n) - }, - } - go func() { - // Copy blob into the gzip writer, which also hashes and counts the - // size of the compressed output, and hasher of the raw contents. - _, copyErr := io.Copy(io.MultiWriter(h, zw), l.blob) - - // Close the gzip writer once copying is done. If this is done in the - // Close method of compressedReader instead, then it can cause a panic - // when the compressedReader is closed before the blob is fully - // consumed and io.Copy in this goroutine is still blocking. - closeErr := zw.Close() - - // Check errors from writing and closing streams. - if copyErr != nil { - close(doneDigesting) - pw.CloseWithError(copyErr) - return - } - if closeErr != nil { - close(doneDigesting) - pw.CloseWithError(closeErr) - return - } - - // Flush the buffer once all writes are complete to the gzip writer. - if err := bw.Flush(); err != nil { - close(doneDigesting) - pw.CloseWithError(err) - return - } - - // Notify closer that digests are done being written. - close(doneDigesting) - - // Close the compressed reader to calculate digest/diffID/size. This - // will cause pr to return EOF which will cause readers of the - // Compressed stream to finish reading. - pw.CloseWithError(cr.Close()) - }() - - return cr, nil -} - -func (cr *compressedReader) Read(b []byte) (int, error) { return cr.pr.Read(b) } - -func (cr *compressedReader) Close() error { return cr.closer() } - -// countWriter counts bytes written to it. -type countWriter struct{ n int64 } - -func (c *countWriter) Write(p []byte) (int, error) { - c.n += int64(len(p)) - return len(p), nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md deleted file mode 100644 index 03f339b063..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md +++ /dev/null @@ -1,280 +0,0 @@ -# `tarball` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball) - -This package produces tarballs that can consumed via `docker load`. Note -that this is a _different_ format from the [`legacy`](/pkg/legacy/tarball) -tarballs that are produced by `docker save`, but this package is still able to -read the legacy tarballs produced by `docker save`. - -## Usage - -```go -package main - -import ( - "os" - - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/tarball" -) - -func main() { - // Read a tarball from os.Args[1] that contains ubuntu. - tag, err := name.NewTag("ubuntu") - if err != nil { - panic(err) - } - img, err := tarball.ImageFromPath(os.Args[1], &tag) - if err != nil { - panic(err) - } - - // Write that tarball to os.Args[2] with a different tag. - newTag, err := name.NewTag("ubuntu:newest") - if err != nil { - panic(err) - } - f, err := os.Create(os.Args[2]) - if err != nil { - panic(err) - } - defer f.Close() - - if err := tarball.Write(newTag, img, f); err != nil { - panic(err) - } -} -``` - -## Structure - -

- -

- -Let's look at what happens when we write out a tarball: - - -### `ubuntu:latest` - -``` -$ crane pull ubuntu ubuntu.tar && mkdir ubuntu && tar xf ubuntu.tar -C ubuntu && rm ubuntu.tar -$ tree ubuntu/ -ubuntu/ -├── 423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954.tar.gz -├── b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7.tar.gz -├── de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d.tar.gz -├── f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b.tar.gz -├── manifest.json -└── sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c - -0 directories, 6 files -``` - -There are a couple interesting files here. - -`manifest.json` is the entrypoint: a list of [`tarball.Descriptor`s](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball#Descriptor) -that describe the images contained in this tarball. - -For each image, this has the `RepoTags` (how it was pulled), a `Config` file -that points to the image's config file, a list of `Layers`, and (optionally) -`LayerSources`. - -``` -$ jq < ubuntu/manifest.json -[ - { - "Config": "sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c", - "RepoTags": [ - "ubuntu" - ], - "Layers": [ - "423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954.tar.gz", - "de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d.tar.gz", - "f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b.tar.gz", - "b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7.tar.gz" - ] - } -] -``` - -The config file and layers are exactly what you would expect, and match the -registry representations of the same artifacts. You'll notice that the -`manifest.json` contains similar information as the registry manifest, but isn't -quite the same: - -``` -$ crane manifest ubuntu@sha256:0925d086715714114c1988f7c947db94064fd385e171a63c07730f1fa014e6f9 -{ - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "config": { - "mediaType": "application/vnd.docker.container.image.v1+json", - "size": 3408, - "digest": "sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c" - }, - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 26692096, - "digest": "sha256:423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 35365, - "digest": "sha256:de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 852, - "digest": "sha256:f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 163, - "digest": "sha256:b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7" - } - ] -} -``` - -This makes it difficult to maintain image digests when roundtripping images -through the tarball format, so it's not a great format if you care about -provenance. - -The ubuntu example didn't have any `LayerSources` -- let's look at another image -that does. - -### `hello-world:nanoserver` - -``` -$ crane pull hello-world:nanoserver@sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b nanoserver.tar -$ mkdir nanoserver && tar xf nanoserver.tar -C nanoserver && rm nanoserver.tar -$ tree nanoserver/ -nanoserver/ -├── 10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0.tar.gz -├── a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053.tar.gz -├── be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a.tar.gz -├── manifest.json -└── sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6 - -0 directories, 5 files - -$ jq < nanoserver/manifest.json -[ - { - "Config": "sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6", - "RepoTags": [ - "index.docker.io/library/hello-world:i-was-a-digest" - ], - "Layers": [ - "a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053.tar.gz", - "be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a.tar.gz", - "10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0.tar.gz" - ], - "LayerSources": { - "sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e": { - "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", - "size": 101145811, - "digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053", - "urls": [ - "https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053" - ] - } - } - } -] -``` - -A couple things to note about this `manifest.json` versus the other: -* The `RepoTags` field is a bit weird here. `hello-world` is a multi-platform - image, so We had to pull this image by digest, since we're (I'm) on - amd64/linux and wanted to grab a windows image. Since the tarball format - expects a tag under `RepoTags`, and we didn't pull by tag, we replace the - digest with a sentinel `i-was-a-digest` "tag" to appease docker. -* The `LayerSources` has enough information to reconstruct the foreign layers - pointer when pushing/pulling from the registry. For legal reasons, microsoft - doesn't want anyone but them to serve windows base images, so the mediaType - here indicates a "foreign" or "non-distributable" layer with an URL for where - you can download it from microsoft (see the [OCI - image-spec](https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers)). - -We can look at what's in the registry to explain both of these things: -``` -$ crane manifest hello-world:nanoserver | jq . -{ - "manifests": [ - { - "digest": "sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b", - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "platform": { - "architecture": "amd64", - "os": "windows", - "os.version": "10.0.17763.1040" - }, - "size": 1124 - } - ], - "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", - "schemaVersion": 2 -} - - -# Note the media type and "urls" field. -$ crane manifest hello-world:nanoserver@sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b | jq . -{ - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "config": { - "mediaType": "application/vnd.docker.container.image.v1+json", - "size": 1721, - "digest": "sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6" - }, - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", - "size": 101145811, - "digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053", - "urls": [ - "https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053" - ] - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 1669, - "digest": "sha256:be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 949, - "digest": "sha256:10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0" - } - ] -} -``` - -The `LayerSources` map is keyed by the diffid. Note that `sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e` matches the first layer in the config file: -``` -$ jq '.[0].LayerSources' < nanoserver/manifest.json -{ - "sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e": { - "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", - "size": 101145811, - "digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053", - "urls": [ - "https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053" - ] - } -} - -$ jq < nanoserver/sha256\:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6 | jq .rootfs -{ - "type": "layers", - "diff_ids": [ - "sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e", - "sha256:601cf7d78c62e4b4d32a7bbf96a17606a9cea5bd9d22ffa6f34aa431d056b0e8", - "sha256:a1e1a3bf6529adcce4d91dce2cad86c2604a66b507ccbc4d2239f3da0ec5aab9" - ] -} -``` diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go deleted file mode 100644 index 4eb79bb4e5..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package tarball provides facilities for reading/writing v1.Images from/to -// a tarball on-disk. -package tarball diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go deleted file mode 100644 index aba609deac..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go +++ /dev/null @@ -1,440 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tarball - -import ( - "archive/tar" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "path" - "path/filepath" - "sync" - - comp "github.com/google/go-containerregistry/internal/compression" - "github.com/google/go-containerregistry/pkg/compression" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -type image struct { - opener Opener - manifest *Manifest - config []byte - imgDescriptor *Descriptor - - tag *name.Tag -} - -type uncompressedImage struct { - *image -} - -type compressedImage struct { - *image - manifestLock sync.Mutex // Protects manifest - manifest *v1.Manifest -} - -var _ partial.UncompressedImageCore = (*uncompressedImage)(nil) -var _ partial.CompressedImageCore = (*compressedImage)(nil) - -// Opener is a thunk for opening a tar file. -type Opener func() (io.ReadCloser, error) - -func pathOpener(path string) Opener { - return func() (io.ReadCloser, error) { - return os.Open(path) - } -} - -// ImageFromPath returns a v1.Image from a tarball located on path. -func ImageFromPath(path string, tag *name.Tag) (v1.Image, error) { - return Image(pathOpener(path), tag) -} - -// LoadManifest load manifest -func LoadManifest(opener Opener) (Manifest, error) { - m, err := extractFileFromTar(opener, "manifest.json") - if err != nil { - return nil, err - } - defer m.Close() - - var manifest Manifest - - if err := json.NewDecoder(m).Decode(&manifest); err != nil { - return nil, err - } - return manifest, nil -} - -// Image exposes an image from the tarball at the provided path. -func Image(opener Opener, tag *name.Tag) (v1.Image, error) { - img := &image{ - opener: opener, - tag: tag, - } - if err := img.loadTarDescriptorAndConfig(); err != nil { - return nil, err - } - - // Peek at the first layer and see if it's compressed. - if len(img.imgDescriptor.Layers) > 0 { - compressed, err := img.areLayersCompressed() - if err != nil { - return nil, err - } - if compressed { - c := compressedImage{ - image: img, - } - return partial.CompressedToImage(&c) - } - } - - uc := uncompressedImage{ - image: img, - } - return partial.UncompressedToImage(&uc) -} - -func (i *image) MediaType() (types.MediaType, error) { - return types.DockerManifestSchema2, nil -} - -// Descriptor stores the manifest data for a single image inside a `docker save` tarball. -type Descriptor struct { - Config string - RepoTags []string - Layers []string - - // Tracks foreign layer info. Key is DiffID. - LayerSources map[v1.Hash]v1.Descriptor `json:",omitempty"` -} - -// Manifest represents the manifests of all images as the `manifest.json` file in a `docker save` tarball. -type Manifest []Descriptor - -func (m Manifest) findDescriptor(tag *name.Tag) (*Descriptor, error) { - if tag == nil { - if len(m) != 1 { - return nil, errors.New("tarball must contain only a single image to be used with tarball.Image") - } - return &(m)[0], nil - } - for _, img := range m { - for _, tagStr := range img.RepoTags { - repoTag, err := name.NewTag(tagStr) - if err != nil { - return nil, err - } - - // Compare the resolved names, since there are several ways to specify the same tag. - if repoTag.Name() == tag.Name() { - return &img, nil - } - } - } - return nil, fmt.Errorf("tag %s not found in tarball", tag) -} - -func (i *image) areLayersCompressed() (bool, error) { - if len(i.imgDescriptor.Layers) == 0 { - return false, errors.New("0 layers found in image") - } - layer := i.imgDescriptor.Layers[0] - blob, err := extractFileFromTar(i.opener, layer) - if err != nil { - return false, err - } - defer blob.Close() - - cp, _, err := comp.PeekCompression(blob) - if err != nil { - return false, err - } - - return cp != compression.None, nil -} - -func (i *image) loadTarDescriptorAndConfig() error { - m, err := extractFileFromTar(i.opener, "manifest.json") - if err != nil { - return err - } - defer m.Close() - - if err := json.NewDecoder(m).Decode(&i.manifest); err != nil { - return err - } - - if i.manifest == nil { - return errors.New("no valid manifest.json in tarball") - } - - i.imgDescriptor, err = i.manifest.findDescriptor(i.tag) - if err != nil { - return err - } - - cfg, err := extractFileFromTar(i.opener, i.imgDescriptor.Config) - if err != nil { - return err - } - defer cfg.Close() - - i.config, err = io.ReadAll(cfg) - if err != nil { - return err - } - return nil -} - -func (i *image) RawConfigFile() ([]byte, error) { - return i.config, nil -} - -// tarFile represents a single file inside a tar. Closing it closes the tar itself. -type tarFile struct { - io.Reader - io.Closer -} - -func extractFileFromTar(opener Opener, filePath string) (io.ReadCloser, error) { - f, err := opener() - if err != nil { - return nil, err - } - needClose := true - defer func() { - if needClose { - f.Close() - } - }() - - tf := tar.NewReader(f) - for { - hdr, err := tf.Next() - if errors.Is(err, io.EOF) { - break - } - if err != nil { - return nil, err - } - if hdr.Name == filePath { - if hdr.Typeflag == tar.TypeSymlink || hdr.Typeflag == tar.TypeLink { - currentDir := filepath.Dir(filePath) - return extractFileFromTar(opener, path.Join(currentDir, path.Clean(hdr.Linkname))) - } - needClose = false - return tarFile{ - Reader: tf, - Closer: f, - }, nil - } - } - return nil, fmt.Errorf("file %s not found in tar", filePath) -} - -// uncompressedLayerFromTarball implements partial.UncompressedLayer -type uncompressedLayerFromTarball struct { - diffID v1.Hash - mediaType types.MediaType - opener Opener - filePath string -} - -// foreignUncompressedLayer implements partial.UncompressedLayer but returns -// a custom descriptor. This allows the foreign layer URLs to be included in -// the generated image manifest for uncompressed layers. -type foreignUncompressedLayer struct { - uncompressedLayerFromTarball - desc v1.Descriptor -} - -func (fl *foreignUncompressedLayer) Descriptor() (*v1.Descriptor, error) { - return &fl.desc, nil -} - -// DiffID implements partial.UncompressedLayer -func (ulft *uncompressedLayerFromTarball) DiffID() (v1.Hash, error) { - return ulft.diffID, nil -} - -// Uncompressed implements partial.UncompressedLayer -func (ulft *uncompressedLayerFromTarball) Uncompressed() (io.ReadCloser, error) { - return extractFileFromTar(ulft.opener, ulft.filePath) -} - -func (ulft *uncompressedLayerFromTarball) MediaType() (types.MediaType, error) { - return ulft.mediaType, nil -} - -func (i *uncompressedImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) { - cfg, err := partial.ConfigFile(i) - if err != nil { - return nil, err - } - for idx, diffID := range cfg.RootFS.DiffIDs { - if diffID == h { - // Technically the media type should be 'application/tar' but given that our - // v1.Layer doesn't force consumers to care about whether the layer is compressed - // we should be fine returning the DockerLayer media type - mt := types.DockerLayer - bd, ok := i.imgDescriptor.LayerSources[h] - if ok { - // This is janky, but we don't want to implement Descriptor for - // uncompressed layers because it breaks a bunch of assumptions in partial. - // See https://github.com/google/go-containerregistry/issues/1870 - docker25workaround := bd.MediaType == types.DockerUncompressedLayer || bd.MediaType == types.OCIUncompressedLayer - - if !docker25workaround { - // Overwrite the mediaType for foreign layers. - return &foreignUncompressedLayer{ - uncompressedLayerFromTarball: uncompressedLayerFromTarball{ - diffID: diffID, - mediaType: bd.MediaType, - opener: i.opener, - filePath: i.imgDescriptor.Layers[idx], - }, - desc: bd, - }, nil - } - - // Intentional fall through. - } - - return &uncompressedLayerFromTarball{ - diffID: diffID, - mediaType: mt, - opener: i.opener, - filePath: i.imgDescriptor.Layers[idx], - }, nil - } - } - return nil, fmt.Errorf("diff id %q not found", h) -} - -func (c *compressedImage) Manifest() (*v1.Manifest, error) { - c.manifestLock.Lock() - defer c.manifestLock.Unlock() - if c.manifest != nil { - return c.manifest, nil - } - - b, err := c.RawConfigFile() - if err != nil { - return nil, err - } - - cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) - if err != nil { - return nil, err - } - - c.manifest = &v1.Manifest{ - SchemaVersion: 2, - MediaType: types.DockerManifestSchema2, - Config: v1.Descriptor{ - MediaType: types.DockerConfigJSON, - Size: cfgSize, - Digest: cfgHash, - }, - } - - for i, p := range c.imgDescriptor.Layers { - cfg, err := partial.ConfigFile(c) - if err != nil { - return nil, err - } - diffid := cfg.RootFS.DiffIDs[i] - if d, ok := c.imgDescriptor.LayerSources[diffid]; ok { - // If it's a foreign layer, just append the descriptor so we can avoid - // reading the entire file. - c.manifest.Layers = append(c.manifest.Layers, d) - } else { - l, err := extractFileFromTar(c.opener, p) - if err != nil { - return nil, err - } - defer l.Close() - sha, size, err := v1.SHA256(l) - if err != nil { - return nil, err - } - c.manifest.Layers = append(c.manifest.Layers, v1.Descriptor{ - MediaType: types.DockerLayer, - Size: size, - Digest: sha, - }) - } - } - return c.manifest, nil -} - -func (c *compressedImage) RawManifest() ([]byte, error) { - return partial.RawManifest(c) -} - -// compressedLayerFromTarball implements partial.CompressedLayer -type compressedLayerFromTarball struct { - desc v1.Descriptor - opener Opener - filePath string -} - -// Digest implements partial.CompressedLayer -func (clft *compressedLayerFromTarball) Digest() (v1.Hash, error) { - return clft.desc.Digest, nil -} - -// Compressed implements partial.CompressedLayer -func (clft *compressedLayerFromTarball) Compressed() (io.ReadCloser, error) { - return extractFileFromTar(clft.opener, clft.filePath) -} - -// MediaType implements partial.CompressedLayer -func (clft *compressedLayerFromTarball) MediaType() (types.MediaType, error) { - return clft.desc.MediaType, nil -} - -// Size implements partial.CompressedLayer -func (clft *compressedLayerFromTarball) Size() (int64, error) { - return clft.desc.Size, nil -} - -func (c *compressedImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { - m, err := c.Manifest() - if err != nil { - return nil, err - } - for i, l := range m.Layers { - if l.Digest == h { - fp := c.imgDescriptor.Layers[i] - return &compressedLayerFromTarball{ - desc: l, - opener: c.opener, - filePath: fp, - }, nil - } - } - return nil, fmt.Errorf("blob %v not found", h) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go deleted file mode 100644 index 8a26309618..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tarball - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "os" - "sync" - - "github.com/containerd/stargz-snapshotter/estargz" - "github.com/google/go-containerregistry/internal/and" - comp "github.com/google/go-containerregistry/internal/compression" - gestargz "github.com/google/go-containerregistry/internal/estargz" - ggzip "github.com/google/go-containerregistry/internal/gzip" - "github.com/google/go-containerregistry/internal/zstd" - "github.com/google/go-containerregistry/pkg/compression" - "github.com/google/go-containerregistry/pkg/logs" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -type layer struct { - digest v1.Hash - diffID v1.Hash - size int64 - compressedopener Opener - uncompressedopener Opener - compression compression.Compression - compressionLevel int - annotations map[string]string - estgzopts []estargz.Option - mediaType types.MediaType -} - -// Descriptor implements partial.withDescriptor. -func (l *layer) Descriptor() (*v1.Descriptor, error) { - digest, err := l.Digest() - if err != nil { - return nil, err - } - return &v1.Descriptor{ - Size: l.size, - Digest: digest, - Annotations: l.annotations, - MediaType: l.mediaType, - }, nil -} - -// Digest implements v1.Layer -func (l *layer) Digest() (v1.Hash, error) { - return l.digest, nil -} - -// DiffID implements v1.Layer -func (l *layer) DiffID() (v1.Hash, error) { - return l.diffID, nil -} - -// Compressed implements v1.Layer -func (l *layer) Compressed() (io.ReadCloser, error) { - return l.compressedopener() -} - -// Uncompressed implements v1.Layer -func (l *layer) Uncompressed() (io.ReadCloser, error) { - return l.uncompressedopener() -} - -// Size implements v1.Layer -func (l *layer) Size() (int64, error) { - return l.size, nil -} - -// MediaType implements v1.Layer -func (l *layer) MediaType() (types.MediaType, error) { - return l.mediaType, nil -} - -// LayerOption applies options to layer -type LayerOption func(*layer) - -// WithCompression is a functional option for overriding the default -// compression algorithm used for compressing uncompressed tarballs. -// Please note that WithCompression(compression.ZStd) should be used -// in conjunction with WithMediaType(types.OCILayerZStd) -func WithCompression(comp compression.Compression) LayerOption { - return func(l *layer) { - switch comp { - case compression.ZStd: - l.compression = compression.ZStd - case compression.GZip: - l.compression = compression.GZip - case compression.None: - logs.Warn.Printf("Compression type 'none' is not supported for tarball layers; using gzip compression.") - l.compression = compression.GZip - default: - logs.Warn.Printf("Unexpected compression type for WithCompression(): %s; using gzip compression instead.", comp) - l.compression = compression.GZip - } - } -} - -// WithCompressionLevel is a functional option for overriding the default -// compression level used for compressing uncompressed tarballs. -func WithCompressionLevel(level int) LayerOption { - return func(l *layer) { - l.compressionLevel = level - } -} - -// WithMediaType is a functional option for overriding the layer's media type. -func WithMediaType(mt types.MediaType) LayerOption { - return func(l *layer) { - l.mediaType = mt - } -} - -// WithCompressedCaching is a functional option that overrides the -// logic for accessing the compressed bytes to memoize the result -// and avoid expensive repeated gzips. -func WithCompressedCaching(l *layer) { - var once sync.Once - var err error - - buf := bytes.NewBuffer(nil) - og := l.compressedopener - - l.compressedopener = func() (io.ReadCloser, error) { - once.Do(func() { - var rc io.ReadCloser - rc, err = og() - if err == nil { - defer rc.Close() - _, err = io.Copy(buf, rc) - } - }) - if err != nil { - return nil, err - } - - return io.NopCloser(bytes.NewBuffer(buf.Bytes())), nil - } -} - -// WithEstargzOptions is a functional option that allow the caller to pass -// through estargz.Options to the underlying compression layer. This is -// only meaningful when estargz is enabled. -// -// Deprecated: WithEstargz is deprecated, and will be removed in a future release. -func WithEstargzOptions(opts ...estargz.Option) LayerOption { - return func(l *layer) { - l.estgzopts = opts - } -} - -// WithEstargz is a functional option that explicitly enables estargz support. -// -// Deprecated: WithEstargz is deprecated, and will be removed in a future release. -func WithEstargz(l *layer) { - oguncompressed := l.uncompressedopener - estargz := func() (io.ReadCloser, error) { - crc, err := oguncompressed() - if err != nil { - return nil, err - } - eopts := append(l.estgzopts, estargz.WithCompressionLevel(l.compressionLevel)) - rc, h, err := gestargz.ReadCloser(crc, eopts...) - if err != nil { - return nil, err - } - l.annotations[estargz.TOCJSONDigestAnnotation] = h.String() - return &and.ReadCloser{ - Reader: rc, - CloseFunc: func() error { - err := rc.Close() - if err != nil { - return err - } - // As an optimization, leverage the DiffID exposed by the estargz ReadCloser - l.diffID, err = v1.NewHash(rc.DiffID().String()) - return err - }, - }, nil - } - uncompressed := func() (io.ReadCloser, error) { - urc, err := estargz() - if err != nil { - return nil, err - } - return ggzip.UnzipReadCloser(urc) - } - - l.compressedopener = estargz - l.uncompressedopener = uncompressed -} - -// LayerFromFile returns a v1.Layer given a tarball -func LayerFromFile(path string, opts ...LayerOption) (v1.Layer, error) { - opener := func() (io.ReadCloser, error) { - return os.Open(path) - } - return LayerFromOpener(opener, opts...) -} - -// LayerFromOpener returns a v1.Layer given an Opener function. -// The Opener may return either an uncompressed tarball (common), -// or a compressed tarball (uncommon). -// -// When using this in conjunction with something like remote.Write -// the uncompressed path may end up gzipping things multiple times: -// 1. Compute the layer SHA256 -// 2. Upload the compressed layer. -// -// Since gzip can be expensive, we support an option to memoize the -// compression that can be passed here: tarball.WithCompressedCaching -func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) { - comp, err := comp.GetCompression(opener) - if err != nil { - return nil, err - } - - layer := &layer{ - compression: compression.GZip, - compressionLevel: gzip.BestSpeed, - annotations: make(map[string]string, 1), - mediaType: types.DockerLayer, - } - - if estgz := os.Getenv("GGCR_EXPERIMENT_ESTARGZ"); estgz == "1" { - logs.Warn.Println("GGCR_EXPERIMENT_ESTARGZ is deprecated, and will be removed in a future release.") - opts = append([]LayerOption{WithEstargz}, opts...) - } - - switch comp { - case compression.GZip: - layer.compressedopener = opener - layer.uncompressedopener = func() (io.ReadCloser, error) { - urc, err := opener() - if err != nil { - return nil, err - } - return ggzip.UnzipReadCloser(urc) - } - case compression.ZStd: - layer.compressedopener = opener - layer.uncompressedopener = func() (io.ReadCloser, error) { - urc, err := opener() - if err != nil { - return nil, err - } - return zstd.UnzipReadCloser(urc) - } - default: - layer.uncompressedopener = opener - layer.compressedopener = func() (io.ReadCloser, error) { - crc, err := opener() - if err != nil { - return nil, err - } - - if layer.compression == compression.ZStd { - return zstd.ReadCloserLevel(crc, layer.compressionLevel), nil - } - - return ggzip.ReadCloserLevel(crc, layer.compressionLevel), nil - } - } - - for _, opt := range opts { - opt(layer) - } - - // Warn if media type does not match compression - var mediaTypeMismatch = false - switch layer.compression { - case compression.GZip: - mediaTypeMismatch = - layer.mediaType != types.OCILayer && - layer.mediaType != types.OCIRestrictedLayer && - layer.mediaType != types.DockerLayer - - case compression.ZStd: - mediaTypeMismatch = layer.mediaType != types.OCILayerZStd - } - - if mediaTypeMismatch { - logs.Warn.Printf("Unexpected mediaType (%s) for selected compression in %s in LayerFromOpener().", layer.mediaType, layer.compression) - } - - if layer.digest, layer.size, err = computeDigest(layer.compressedopener); err != nil { - return nil, err - } - - empty := v1.Hash{} - if layer.diffID == empty { - if layer.diffID, err = computeDiffID(layer.uncompressedopener); err != nil { - return nil, err - } - } - - return layer, nil -} - -// LayerFromReader returns a v1.Layer given a io.Reader. -// -// The reader's contents are read and buffered to a temp file in the process. -// -// Deprecated: Use LayerFromOpener or stream.NewLayer instead, if possible. -func LayerFromReader(reader io.Reader, opts ...LayerOption) (v1.Layer, error) { - tmp, err := os.CreateTemp("", "") - if err != nil { - return nil, fmt.Errorf("creating temp file to buffer reader: %w", err) - } - if _, err := io.Copy(tmp, reader); err != nil { - return nil, fmt.Errorf("writing temp file to buffer reader: %w", err) - } - return LayerFromFile(tmp.Name(), opts...) -} - -func computeDigest(opener Opener) (v1.Hash, int64, error) { - rc, err := opener() - if err != nil { - return v1.Hash{}, 0, err - } - defer rc.Close() - - return v1.SHA256(rc) -} - -func computeDiffID(opener Opener) (v1.Hash, error) { - rc, err := opener() - if err != nil { - return v1.Hash{}, err - } - defer rc.Close() - - digest, _, err := v1.SHA256(rc) - return digest, err -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go deleted file mode 100644 index e607df164a..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tarball - -import ( - "archive/tar" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "sort" - "strings" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" -) - -// WriteToFile writes in the compressed format to a tarball, on disk. -// This is just syntactic sugar wrapping tarball.Write with a new file. -func WriteToFile(p string, ref name.Reference, img v1.Image, opts ...WriteOption) error { - w, err := os.Create(p) - if err != nil { - return err - } - defer w.Close() - - return Write(ref, img, w, opts...) -} - -// MultiWriteToFile writes in the compressed format to a tarball, on disk. -// This is just syntactic sugar wrapping tarball.MultiWrite with a new file. -func MultiWriteToFile(p string, tagToImage map[name.Tag]v1.Image, opts ...WriteOption) error { - refToImage := make(map[name.Reference]v1.Image, len(tagToImage)) - for i, d := range tagToImage { - refToImage[i] = d - } - return MultiRefWriteToFile(p, refToImage, opts...) -} - -// MultiRefWriteToFile writes in the compressed format to a tarball, on disk. -// This is just syntactic sugar wrapping tarball.MultiRefWrite with a new file. -func MultiRefWriteToFile(p string, refToImage map[name.Reference]v1.Image, opts ...WriteOption) error { - w, err := os.Create(p) - if err != nil { - return err - } - defer w.Close() - - return MultiRefWrite(refToImage, w, opts...) -} - -// Write is a wrapper to write a single image and tag to a tarball. -func Write(ref name.Reference, img v1.Image, w io.Writer, opts ...WriteOption) error { - return MultiRefWrite(map[name.Reference]v1.Image{ref: img}, w, opts...) -} - -// MultiWrite writes the contents of each image to the provided writer, in the compressed format. -// The contents are written in the following format: -// One manifest.json file at the top level containing information about several images. -// One file for each layer, named after the layer's SHA. -// One file for the config blob, named after its SHA. -func MultiWrite(tagToImage map[name.Tag]v1.Image, w io.Writer, opts ...WriteOption) error { - refToImage := make(map[name.Reference]v1.Image, len(tagToImage)) - for i, d := range tagToImage { - refToImage[i] = d - } - return MultiRefWrite(refToImage, w, opts...) -} - -// MultiRefWrite writes the contents of each image to the provided writer, in the compressed format. -// The contents are written in the following format: -// One manifest.json file at the top level containing information about several images. -// One file for each layer, named after the layer's SHA. -// One file for the config blob, named after its SHA. -func MultiRefWrite(refToImage map[name.Reference]v1.Image, w io.Writer, opts ...WriteOption) error { - // process options - o := &writeOptions{ - updates: nil, - } - for _, option := range opts { - if err := option(o); err != nil { - return err - } - } - - imageToTags := dedupRefToImage(refToImage) - size, mBytes, err := getSizeAndManifest(imageToTags) - if err != nil { - return sendUpdateReturn(o, err) - } - - return writeImagesToTar(imageToTags, mBytes, size, w, o) -} - -// sendUpdateReturn return the passed in error message, also sending on update channel, if it exists -func sendUpdateReturn(o *writeOptions, err error) error { - if o != nil && o.updates != nil { - o.updates <- v1.Update{ - Error: err, - } - } - return err -} - -// sendProgressWriterReturn return the passed in error message, also sending on update channel, if it exists, along with downloaded information -func sendProgressWriterReturn(pw *progressWriter, err error) error { - if pw != nil { - return pw.Error(err) - } - return err -} - -// writeImagesToTar writes the images to the tarball -func writeImagesToTar(imageToTags map[v1.Image][]string, m []byte, size int64, w io.Writer, o *writeOptions) (err error) { - if w == nil { - return sendUpdateReturn(o, errors.New("must pass valid writer")) - } - - tw := w - var pw *progressWriter - - // we only calculate the sizes and use a progressWriter if we were provided - // an option with a progress channel - if o != nil && o.updates != nil { - pw = &progressWriter{ - w: w, - updates: o.updates, - size: size, - } - tw = pw - } - - tf := tar.NewWriter(tw) - defer tf.Close() - - seenLayerDigests := make(map[string]struct{}) - - for img := range imageToTags { - // Write the config. - cfgName, err := img.ConfigName() - if err != nil { - return sendProgressWriterReturn(pw, err) - } - cfgBlob, err := img.RawConfigFile() - if err != nil { - return sendProgressWriterReturn(pw, err) - } - if err := writeTarEntry(tf, cfgName.String(), bytes.NewReader(cfgBlob), int64(len(cfgBlob))); err != nil { - return sendProgressWriterReturn(pw, err) - } - - // Write the layers. - layers, err := img.Layers() - if err != nil { - return sendProgressWriterReturn(pw, err) - } - layerFiles := make([]string, len(layers)) - for i, l := range layers { - d, err := l.Digest() - if err != nil { - return sendProgressWriterReturn(pw, err) - } - // Munge the file name to appease ancient technology. - // - // tar assumes anything with a colon is a remote tape drive: - // https://www.gnu.org/software/tar/manual/html_section/tar_45.html - // Drop the algorithm prefix, e.g. "sha256:" - hex := d.Hex - - // gunzip expects certain file extensions: - // https://www.gnu.org/software/gzip/manual/html_node/Overview.html - layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex) - - if _, ok := seenLayerDigests[hex]; ok { - continue - } - seenLayerDigests[hex] = struct{}{} - - r, err := l.Compressed() - if err != nil { - return sendProgressWriterReturn(pw, err) - } - blobSize, err := l.Size() - if err != nil { - return sendProgressWriterReturn(pw, err) - } - - if err := writeTarEntry(tf, layerFiles[i], r, blobSize); err != nil { - return sendProgressWriterReturn(pw, err) - } - } - } - if err := writeTarEntry(tf, "manifest.json", bytes.NewReader(m), int64(len(m))); err != nil { - return sendProgressWriterReturn(pw, err) - } - - // be sure to close the tar writer so everything is flushed out before we send our EOF - if err := tf.Close(); err != nil { - return sendProgressWriterReturn(pw, err) - } - // send an EOF to indicate finished on the channel, but nil as our return error - _ = sendProgressWriterReturn(pw, io.EOF) - return nil -} - -// calculateManifest calculates the manifest and optionally the size of the tar file -func calculateManifest(imageToTags map[v1.Image][]string) (m Manifest, err error) { - if len(imageToTags) == 0 { - return nil, errors.New("set of images is empty") - } - - for img, tags := range imageToTags { - cfgName, err := img.ConfigName() - if err != nil { - return nil, err - } - - // Store foreign layer info. - layerSources := make(map[v1.Hash]v1.Descriptor) - - // Write the layers. - layers, err := img.Layers() - if err != nil { - return nil, err - } - layerFiles := make([]string, len(layers)) - for i, l := range layers { - d, err := l.Digest() - if err != nil { - return nil, err - } - // Munge the file name to appease ancient technology. - // - // tar assumes anything with a colon is a remote tape drive: - // https://www.gnu.org/software/tar/manual/html_section/tar_45.html - // Drop the algorithm prefix, e.g. "sha256:" - hex := d.Hex - - // gunzip expects certain file extensions: - // https://www.gnu.org/software/gzip/manual/html_node/Overview.html - layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex) - - // Add to LayerSources if it's a foreign layer. - desc, err := partial.BlobDescriptor(img, d) - if err != nil { - return nil, err - } - if !desc.MediaType.IsDistributable() { - diffid, err := partial.BlobToDiffID(img, d) - if err != nil { - return nil, err - } - layerSources[diffid] = *desc - } - } - - // Generate the tar descriptor and write it. - m = append(m, Descriptor{ - Config: cfgName.String(), - RepoTags: tags, - Layers: layerFiles, - LayerSources: layerSources, - }) - } - // sort by name of the repotags so it is consistent. Alternatively, we could sort by hash of the - // descriptor, but that would make it hard for humans to process - sort.Slice(m, func(i, j int) bool { - return strings.Join(m[i].RepoTags, ",") < strings.Join(m[j].RepoTags, ",") - }) - - return m, nil -} - -// CalculateSize calculates the expected complete size of the output tar file -func CalculateSize(refToImage map[name.Reference]v1.Image) (size int64, err error) { - imageToTags := dedupRefToImage(refToImage) - size, _, err = getSizeAndManifest(imageToTags) - return size, err -} - -func getSizeAndManifest(imageToTags map[v1.Image][]string) (int64, []byte, error) { - m, err := calculateManifest(imageToTags) - if err != nil { - return 0, nil, fmt.Errorf("unable to calculate manifest: %w", err) - } - mBytes, err := json.Marshal(m) - if err != nil { - return 0, nil, fmt.Errorf("could not marshall manifest to bytes: %w", err) - } - - size, err := calculateTarballSize(imageToTags, mBytes) - if err != nil { - return 0, nil, fmt.Errorf("error calculating tarball size: %w", err) - } - return size, mBytes, nil -} - -// calculateTarballSize calculates the size of the tar file -func calculateTarballSize(imageToTags map[v1.Image][]string, mBytes []byte) (size int64, err error) { - seenLayerDigests := make(map[string]struct{}) - for img, name := range imageToTags { - manifest, err := img.Manifest() - if err != nil { - return size, fmt.Errorf("unable to get manifest for img %s: %w", name, err) - } - size += calculateSingleFileInTarSize(manifest.Config.Size) - for _, l := range manifest.Layers { - hex := l.Digest.Hex - if _, ok := seenLayerDigests[hex]; ok { - continue - } - seenLayerDigests[hex] = struct{}{} - size += calculateSingleFileInTarSize(l.Size) - } - } - // add the manifest - size += calculateSingleFileInTarSize(int64(len(mBytes))) - - // add the two padding blocks that indicate end of a tar file - size += 1024 - return size, nil -} - -func dedupRefToImage(refToImage map[name.Reference]v1.Image) map[v1.Image][]string { - imageToTags := make(map[v1.Image][]string) - - for ref, img := range refToImage { - if tag, ok := ref.(name.Tag); ok { - if tags, ok := imageToTags[img]; !ok || tags == nil { - imageToTags[img] = []string{} - } - // Docker cannot load tarballs without an explicit tag: - // https://github.com/google/go-containerregistry/issues/890 - // - // We can't use the fully qualified tag.Name() because of rules_docker: - // https://github.com/google/go-containerregistry/issues/527 - // - // If the tag is "latest", but tag.String() doesn't end in ":latest", - // just append it. Kind of gross, but should work for now. - ts := tag.String() - if tag.Identifier() == name.DefaultTag && !strings.HasSuffix(ts, ":"+name.DefaultTag) { - ts = fmt.Sprintf("%s:%s", ts, name.DefaultTag) - } - imageToTags[img] = append(imageToTags[img], ts) - } else if _, ok := imageToTags[img]; !ok { - imageToTags[img] = nil - } - } - - return imageToTags -} - -// writeTarEntry writes a file to the provided writer with a corresponding tar header -func writeTarEntry(tf *tar.Writer, path string, r io.Reader, size int64) error { - hdr := &tar.Header{ - Mode: 0644, - Typeflag: tar.TypeReg, - Size: size, - Name: path, - } - if err := tf.WriteHeader(hdr); err != nil { - return err - } - _, err := io.Copy(tf, r) - return err -} - -// ComputeManifest get the manifest.json that will be written to the tarball -// for multiple references -func ComputeManifest(refToImage map[name.Reference]v1.Image) (Manifest, error) { - imageToTags := dedupRefToImage(refToImage) - return calculateManifest(imageToTags) -} - -// WriteOption a function option to pass to Write() -type WriteOption func(*writeOptions) error -type writeOptions struct { - updates chan<- v1.Update -} - -// WithProgress create a WriteOption for passing to Write() that enables -// a channel to receive updates as they are downloaded and written to disk. -func WithProgress(updates chan<- v1.Update) WriteOption { - return func(o *writeOptions) error { - o.updates = updates - return nil - } -} - -// progressWriter is a writer which will send the download progress -type progressWriter struct { - w io.Writer - updates chan<- v1.Update - size, complete int64 -} - -func (pw *progressWriter) Write(p []byte) (int, error) { - n, err := pw.w.Write(p) - if err != nil { - return n, err - } - - pw.complete += int64(n) - - pw.updates <- v1.Update{ - Total: pw.size, - Complete: pw.complete, - } - - return n, err -} - -func (pw *progressWriter) Error(err error) error { - pw.updates <- v1.Update{ - Total: pw.size, - Complete: pw.complete, - Error: err, - } - return err -} - -func (pw *progressWriter) Close() error { - pw.updates <- v1.Update{ - Total: pw.size, - Complete: pw.complete, - Error: io.EOF, - } - return io.EOF -} - -// calculateSingleFileInTarSize calculate the size a file will take up in a tar archive, -// given the input data. Provided by rounding up to nearest whole block (512) -// and adding header 512 -func calculateSingleFileInTarSize(in int64) (out int64) { - // doing this manually, because math.Round() works with float64 - out += in - if remainder := out % 512; remainder != 0 { - out += (512 - remainder) - } - out += 512 - return out -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go deleted file mode 100644 index c86657d7b8..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package types holds common OCI media types. -package types - -// MediaType is an enumeration of the supported mime types that an element of an image might have. -type MediaType string - -// The collection of known MediaType values. -const ( - OCIContentDescriptor MediaType = "application/vnd.oci.descriptor.v1+json" - OCIImageIndex MediaType = "application/vnd.oci.image.index.v1+json" - OCIManifestSchema1 MediaType = "application/vnd.oci.image.manifest.v1+json" - OCIConfigJSON MediaType = "application/vnd.oci.image.config.v1+json" - OCILayer MediaType = "application/vnd.oci.image.layer.v1.tar+gzip" - OCILayerZStd MediaType = "application/vnd.oci.image.layer.v1.tar+zstd" - OCIRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" - OCIUncompressedLayer MediaType = "application/vnd.oci.image.layer.v1.tar" - OCIUncompressedRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar" - - DockerManifestSchema1 MediaType = "application/vnd.docker.distribution.manifest.v1+json" - DockerManifestSchema1Signed MediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" - DockerManifestSchema2 MediaType = "application/vnd.docker.distribution.manifest.v2+json" - DockerManifestList MediaType = "application/vnd.docker.distribution.manifest.list.v2+json" - DockerLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" - DockerConfigJSON MediaType = "application/vnd.docker.container.image.v1+json" - DockerPluginConfig MediaType = "application/vnd.docker.plugin.v1+json" - DockerForeignLayer MediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" - DockerUncompressedLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar" - - OCIVendorPrefix = "vnd.oci" - DockerVendorPrefix = "vnd.docker" -) - -// IsDistributable returns true if a layer is distributable, see: -// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers -func (m MediaType) IsDistributable() bool { - switch m { - case DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer: - return false - } - return true -} - -// IsImage returns true if the mediaType represents an image manifest, as opposed to something else, like an index. -func (m MediaType) IsImage() bool { - switch m { - case OCIManifestSchema1, DockerManifestSchema2: - return true - } - return false -} - -// IsIndex returns true if the mediaType represents an index, as opposed to something else, like an image. -func (m MediaType) IsIndex() bool { - switch m { - case OCIImageIndex, DockerManifestList: - return true - } - return false -} - -// IsConfig returns true if the mediaType represents a config, as opposed to something else, like an image. -func (m MediaType) IsConfig() bool { - switch m { - case OCIConfigJSON, DockerConfigJSON: - return true - } - return false -} - -func (m MediaType) IsSchema1() bool { - switch m { - case DockerManifestSchema1, DockerManifestSchema1Signed: - return true - } - return false -} - -func (m MediaType) IsLayer() bool { - switch m { - case DockerLayer, DockerUncompressedLayer, OCILayer, OCILayerZStd, OCIUncompressedLayer, DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer: - return true - } - return false -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/validate/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/validate/doc.go deleted file mode 100644 index 91ca87a5fb..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/validate/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package validate provides methods for validating image correctness. -package validate diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/validate/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/validate/image.go deleted file mode 100644 index 94fb767bbc..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/validate/image.go +++ /dev/null @@ -1,288 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package validate - -import ( - "bytes" - "errors" - "fmt" - "io" - "strings" - - "github.com/google/go-cmp/cmp" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" -) - -// Image validates that img does not violate any invariants of the image format. -func Image(img v1.Image, opt ...Option) error { - errs := []string{} - if err := validateLayers(img, opt...); err != nil { - errs = append(errs, fmt.Sprintf("validating layers: %v", err)) - } - - if err := validateConfig(img); err != nil { - errs = append(errs, fmt.Sprintf("validating config: %v", err)) - } - - if err := validateManifest(img); err != nil { - errs = append(errs, fmt.Sprintf("validating manifest: %v", err)) - } - - if len(errs) != 0 { - return errors.New(strings.Join(errs, "\n\n")) - } - return nil -} - -func validateConfig(img v1.Image) error { - cn, err := img.ConfigName() - if err != nil { - return err - } - - rc, err := img.RawConfigFile() - if err != nil { - return err - } - - hash, size, err := v1.SHA256(bytes.NewReader(rc)) - if err != nil { - return err - } - - m, err := img.Manifest() - if err != nil { - return err - } - - cf, err := img.ConfigFile() - if err != nil { - return err - } - - pcf, err := v1.ParseConfigFile(bytes.NewReader(rc)) - if err != nil { - return err - } - - errs := []string{} - if cn != hash { - errs = append(errs, fmt.Sprintf("mismatched config digest: ConfigName()=%s, SHA256(RawConfigFile())=%s", cn, hash)) - } - - if want, got := m.Config.Size, size; want != got { - errs = append(errs, fmt.Sprintf("mismatched config size: Manifest.Config.Size()=%d, len(RawConfigFile())=%d", want, got)) - } - - if diff := cmp.Diff(pcf, cf); diff != "" { - errs = append(errs, fmt.Sprintf("mismatched config content: (-ParseConfigFile(RawConfigFile()) +ConfigFile()) %s", diff)) - } - - if cf.RootFS.Type != "layers" { - errs = append(errs, fmt.Sprintf("invalid ConfigFile.RootFS.Type: %q != %q", cf.RootFS.Type, "layers")) - } - - if len(errs) != 0 { - return errors.New(strings.Join(errs, "\n")) - } - - return nil -} - -func validateLayers(img v1.Image, opt ...Option) error { - o := makeOptions(opt...) - - layers, err := img.Layers() - if err != nil { - return err - } - - if o.fast { - return layersExist(layers) - } - - digests := []v1.Hash{} - diffids := []v1.Hash{} - udiffids := []v1.Hash{} - sizes := []int64{} - for i, layer := range layers { - cl, err := computeLayer(layer) - if errors.Is(err, io.ErrUnexpectedEOF) { - // Errored while reading tar content of layer because a header or - // content section was not the correct length. This is most likely - // due to an incomplete download or otherwise interrupted process. - m, err := img.Manifest() - if err != nil { - return fmt.Errorf("undersized layer[%d] content", i) - } - return fmt.Errorf("undersized layer[%d] content: Manifest.Layers[%d].Size=%d", i, i, m.Layers[i].Size) - } - if err != nil { - return err - } - // Compute all of these first before we call Config() and Manifest() to allow - // for lazy access e.g. for stream.Layer. - digests = append(digests, cl.digest) - diffids = append(diffids, cl.diffid) - udiffids = append(udiffids, cl.uncompressedDiffid) - sizes = append(sizes, cl.size) - } - - cf, err := img.ConfigFile() - if err != nil { - return err - } - - m, err := img.Manifest() - if err != nil { - return err - } - - errs := []string{} - for i, layer := range layers { - digest, err := layer.Digest() - if err != nil { - return err - } - diffid, err := layer.DiffID() - if err != nil { - return err - } - size, err := layer.Size() - if err != nil { - return err - } - mediaType, err := layer.MediaType() - if err != nil { - return err - } - - if _, err := img.LayerByDigest(digest); err != nil { - return err - } - - if _, err := img.LayerByDiffID(diffid); err != nil { - return err - } - - if digest != digests[i] { - errs = append(errs, fmt.Sprintf("mismatched layer[%d] digest: Digest()=%s, SHA256(Compressed())=%s", i, digest, digests[i])) - } - - if m.Layers[i].Digest != digests[i] { - errs = append(errs, fmt.Sprintf("mismatched layer[%d] digest: Manifest.Layers[%d].Digest=%s, SHA256(Compressed())=%s", i, i, m.Layers[i].Digest, digests[i])) - } - - if diffid != diffids[i] { - errs = append(errs, fmt.Sprintf("mismatched layer[%d] diffid: DiffID()=%s, SHA256(Gunzip(Compressed()))=%s", i, diffid, diffids[i])) - } - - if diffid != udiffids[i] { - errs = append(errs, fmt.Sprintf("mismatched layer[%d] diffid: DiffID()=%s, SHA256(Uncompressed())=%s", i, diffid, udiffids[i])) - } - - if cf.RootFS.DiffIDs[i] != diffids[i] { - errs = append(errs, fmt.Sprintf("mismatched layer[%d] diffid: ConfigFile.RootFS.DiffIDs[%d]=%s, SHA256(Gunzip(Compressed()))=%s", i, i, cf.RootFS.DiffIDs[i], diffids[i])) - } - - if size != sizes[i] { - errs = append(errs, fmt.Sprintf("mismatched layer[%d] size: Size()=%d, len(Compressed())=%d", i, size, sizes[i])) - } - - if m.Layers[i].Size != sizes[i] { - errs = append(errs, fmt.Sprintf("mismatched layer[%d] size: Manifest.Layers[%d].Size=%d, len(Compressed())=%d", i, i, m.Layers[i].Size, sizes[i])) - } - - if m.Layers[i].MediaType != mediaType { - errs = append(errs, fmt.Sprintf("mismatched layer[%d] mediaType: Manifest.Layers[%d].MediaType=%s, layer.MediaType()=%s", i, i, m.Layers[i].MediaType, mediaType)) - } - } - if len(errs) != 0 { - return errors.New(strings.Join(errs, "\n")) - } - - return nil -} - -func validateManifest(img v1.Image) error { - digest, err := img.Digest() - if err != nil { - return err - } - - size, err := img.Size() - if err != nil { - return err - } - - rm, err := img.RawManifest() - if err != nil { - return err - } - - hash, _, err := v1.SHA256(bytes.NewReader(rm)) - if err != nil { - return err - } - - m, err := img.Manifest() - if err != nil { - return err - } - - pm, err := v1.ParseManifest(bytes.NewReader(rm)) - if err != nil { - return err - } - - errs := []string{} - if digest != hash { - errs = append(errs, fmt.Sprintf("mismatched manifest digest: Digest()=%s, SHA256(RawManifest())=%s", digest, hash)) - } - - if diff := cmp.Diff(pm, m); diff != "" { - errs = append(errs, fmt.Sprintf("mismatched manifest content: (-ParseManifest(RawManifest()) +Manifest()) %s", diff)) - } - - if size != int64(len(rm)) { - errs = append(errs, fmt.Sprintf("mismatched manifest size: Size()=%d, len(RawManifest())=%d", size, len(rm))) - } - - if len(errs) != 0 { - return errors.New(strings.Join(errs, "\n")) - } - - return nil -} - -func layersExist(layers []v1.Layer) error { - errs := []string{} - for _, layer := range layers { - ok, err := partial.Exists(layer) - if err != nil { - errs = append(errs, err.Error()) - } - if !ok { - errs = append(errs, "layer does not exist") - } - } - - if len(errs) != 0 { - return errors.New(strings.Join(errs, "\n")) - } - - return nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/validate/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/validate/index.go deleted file mode 100644 index 4d833ae672..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/validate/index.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package validate - -import ( - "bytes" - "errors" - "fmt" - "strings" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-containerregistry/pkg/logs" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// Index validates that idx does not violate any invariants of the index format. -func Index(idx v1.ImageIndex, opt ...Option) error { - errs := []string{} - - if err := validateChildren(idx, opt...); err != nil { - errs = append(errs, fmt.Sprintf("validating children: %v", err)) - } - - if err := validateIndexManifest(idx); err != nil { - errs = append(errs, fmt.Sprintf("validating index manifest: %v", err)) - } - - if len(errs) != 0 { - return errors.New(strings.Join(errs, "\n\n")) - } - return nil -} - -type withLayer interface { - Layer(v1.Hash) (v1.Layer, error) -} - -func validateChildren(idx v1.ImageIndex, opt ...Option) error { - manifest, err := idx.IndexManifest() - if err != nil { - return err - } - - errs := []string{} - for i, desc := range manifest.Manifests { - switch desc.MediaType { - case types.OCIImageIndex, types.DockerManifestList: - idx, err := idx.ImageIndex(desc.Digest) - if err != nil { - return err - } - if err := Index(idx, opt...); err != nil { - errs = append(errs, fmt.Sprintf("failed to validate index Manifests[%d](%s): %v", i, desc.Digest, err)) - } - if err := validateMediaType(idx, desc.MediaType); err != nil { - errs = append(errs, fmt.Sprintf("failed to validate index MediaType[%d](%s): %v", i, desc.Digest, err)) - } - case types.OCIManifestSchema1, types.DockerManifestSchema2: - img, err := idx.Image(desc.Digest) - if err != nil { - return err - } - if err := Image(img, opt...); err != nil { - errs = append(errs, fmt.Sprintf("failed to validate image Manifests[%d](%s): %v", i, desc.Digest, err)) - } - if err := validateMediaType(img, desc.MediaType); err != nil { - errs = append(errs, fmt.Sprintf("failed to validate image MediaType[%d](%s): %v", i, desc.Digest, err)) - } - if err := validatePlatform(img, desc.Platform); err != nil { - errs = append(errs, fmt.Sprintf("failed to validate image platform[%d](%s): %v", i, desc.Digest, err)) - } - default: - // Workaround for #819. - if wl, ok := idx.(withLayer); ok { - layer, err := wl.Layer(desc.Digest) - if err != nil { - return fmt.Errorf("failed to get layer Manifests[%d]: %w", i, err) - } - if err := Layer(layer, opt...); err != nil { - lerr := fmt.Sprintf("failed to validate layer Manifests[%d](%s): %v", i, desc.Digest, err) - if desc.MediaType.IsDistributable() { - errs = append(errs, lerr) - } else { - logs.Warn.Printf("nondistributable layer failure: %v", lerr) - } - } - } else { - logs.Warn.Printf("Unexpected manifest: %s", desc.MediaType) - } - } - } - - if len(errs) != 0 { - return errors.New(strings.Join(errs, "\n")) - } - - return nil -} - -type withMediaType interface { - MediaType() (types.MediaType, error) -} - -func validateMediaType(i withMediaType, want types.MediaType) error { - got, err := i.MediaType() - if err != nil { - return err - } - if want != got { - return fmt.Errorf("mismatched mediaType: MediaType() = %v != %v", got, want) - } - - return nil -} - -func validateIndexManifest(idx v1.ImageIndex) error { - digest, err := idx.Digest() - if err != nil { - return err - } - - size, err := idx.Size() - if err != nil { - return err - } - - rm, err := idx.RawManifest() - if err != nil { - return err - } - - hash, _, err := v1.SHA256(bytes.NewReader(rm)) - if err != nil { - return err - } - - m, err := idx.IndexManifest() - if err != nil { - return err - } - - pm, err := v1.ParseIndexManifest(bytes.NewReader(rm)) - if err != nil { - return err - } - - errs := []string{} - if digest != hash { - errs = append(errs, fmt.Sprintf("mismatched manifest digest: Digest()=%s, SHA256(RawManifest())=%s", digest, hash)) - } - - if diff := cmp.Diff(pm, m); diff != "" { - errs = append(errs, fmt.Sprintf("mismatched manifest content: (-ParseIndexManifest(RawManifest()) +Manifest()) %s", diff)) - } - - if size != int64(len(rm)) { - errs = append(errs, fmt.Sprintf("mismatched manifest size: Size()=%d, len(RawManifest())=%d", size, len(rm))) - } - - if len(errs) != 0 { - return errors.New(strings.Join(errs, "\n")) - } - - return nil -} - -func validatePlatform(img v1.Image, want *v1.Platform) error { - if want == nil { - return nil - } - - cf, err := img.ConfigFile() - if err != nil { - return err - } - - got := cf.Platform() - - if got == nil { - return fmt.Errorf("config file missing platform fields") - } - - if got.Equals(*want) { - return nil - } - - errs := []string{} - - if got.OS != want.OS { - errs = append(errs, fmt.Sprintf("mismatched OS: %s != %s", got.OS, want.OS)) - } - - if got.Architecture != want.Architecture { - errs = append(errs, fmt.Sprintf("mismatched Architecture: %s != %s", got.Architecture, want.Architecture)) - } - - if got.OSVersion != want.OSVersion { - errs = append(errs, fmt.Sprintf("mismatched OSVersion: %s != %s", got.OSVersion, want.OSVersion)) - } - - if got.OSVersion != want.OSVersion { - errs = append(errs, fmt.Sprintf("mismatched OSVersion: %s != %s", got.OSVersion, want.OSVersion)) - } - - if len(errs) == 0 { - // If we got here, some features might be mismatched. Just add those... - if len(got.Features) != 0 || len(want.Features) != 0 { - errs = append(errs, fmt.Sprintf("mismatched Features: %v, %v", got.Features, want.Features)) - } - if len(got.OSFeatures) != 0 || len(want.OSFeatures) != 0 { - errs = append(errs, fmt.Sprintf("mismatched OSFeatures: %v, %v", got.OSFeatures, want.OSFeatures)) - } - } - - return errors.New(strings.Join(errs, "\n")) -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/validate/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/validate/layer.go deleted file mode 100644 index fdd8f38cb5..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/validate/layer.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package validate - -import ( - "archive/tar" - "compress/gzip" - "crypto" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" -) - -// Layer validates that the values return by its methods are consistent with the -// contents returned by Compressed and Uncompressed. -func Layer(layer v1.Layer, opt ...Option) error { - o := makeOptions(opt...) - if o.fast { - ok, err := partial.Exists(layer) - if err != nil { - return err - } - if !ok { - return fmt.Errorf("layer does not exist") - } - return nil - } - - cl, err := computeLayer(layer) - if err != nil { - return err - } - - errs := []string{} - - digest, err := layer.Digest() - if err != nil { - return err - } - diffid, err := layer.DiffID() - if err != nil { - return err - } - size, err := layer.Size() - if err != nil { - return err - } - - if digest != cl.digest { - errs = append(errs, fmt.Sprintf("mismatched digest: Digest()=%s, SHA256(Compressed())=%s", digest, cl.digest)) - } - - if diffid != cl.diffid { - errs = append(errs, fmt.Sprintf("mismatched diffid: DiffID()=%s, SHA256(Gunzip(Compressed()))=%s", diffid, cl.diffid)) - } - - if diffid != cl.uncompressedDiffid { - errs = append(errs, fmt.Sprintf("mismatched diffid: DiffID()=%s, SHA256(Uncompressed())=%s", diffid, cl.uncompressedDiffid)) - } - - if size != cl.size { - errs = append(errs, fmt.Sprintf("mismatched size: Size()=%d, len(Compressed())=%d", size, cl.size)) - } - - if len(errs) != 0 { - return errors.New(strings.Join(errs, "\n")) - } - - return nil -} - -type computedLayer struct { - // Calculated from Compressed stream. - digest v1.Hash - size int64 - diffid v1.Hash - - // Calculated from Uncompressed stream. - uncompressedDiffid v1.Hash - uncompressedSize int64 -} - -func computeLayer(layer v1.Layer) (*computedLayer, error) { - compressed, err := layer.Compressed() - if err != nil { - return nil, err - } - - // Keep track of compressed digest. - digester := crypto.SHA256.New() - // Everything read from compressed is written to digester to compute digest. - hashCompressed := io.TeeReader(compressed, digester) - - // Call io.Copy to write from the layer Reader through to the tarReader on - // the other side of the pipe. - pr, pw := io.Pipe() - var size int64 - go func() { - n, err := io.Copy(pw, hashCompressed) - if err != nil { - pw.CloseWithError(err) - return - } - size = n - - // Now close the compressed reader, to flush the gzip stream - // and calculate digest/diffID/size. This will cause pr to - // return EOF which will cause readers of the Compressed stream - // to finish reading. - pw.CloseWithError(compressed.Close()) - }() - - // Read the bytes through gzip.Reader to compute the DiffID. - uncompressed, err := gzip.NewReader(pr) - if err != nil { - return nil, err - } - diffider := crypto.SHA256.New() - hashUncompressed := io.TeeReader(uncompressed, diffider) - - // Ensure there aren't duplicate file paths. - tarReader := tar.NewReader(hashUncompressed) - files := make(map[string]struct{}) - for { - hdr, err := tarReader.Next() - if errors.Is(err, io.EOF) { - break - } - if err != nil { - return nil, err - } - if _, ok := files[hdr.Name]; ok { - return nil, fmt.Errorf("duplicate file path: %s", hdr.Name) - } - files[hdr.Name] = struct{}{} - } - - // Discard any trailing padding that the tar.Reader doesn't consume. - if _, err := io.Copy(io.Discard, hashUncompressed); err != nil { - return nil, err - } - - if err := uncompressed.Close(); err != nil { - return nil, err - } - - digest := v1.Hash{ - Algorithm: "sha256", - Hex: hex.EncodeToString(digester.Sum(make([]byte, 0, digester.Size()))), - } - - diffid := v1.Hash{ - Algorithm: "sha256", - Hex: hex.EncodeToString(diffider.Sum(make([]byte, 0, diffider.Size()))), - } - - ur, err := layer.Uncompressed() - if err != nil { - return nil, err - } - defer ur.Close() - udiffid, usize, err := v1.SHA256(ur) - if err != nil { - return nil, err - } - - return &computedLayer{ - digest: digest, - diffid: diffid, - size: size, - uncompressedDiffid: udiffid, - uncompressedSize: usize, - }, nil -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/validate/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/validate/options.go deleted file mode 100644 index a6bf2dcc2c..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/validate/options.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2021 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package validate - -// Option is a functional option for validate. -type Option func(*options) - -type options struct { - fast bool -} - -func makeOptions(opts ...Option) options { - opt := options{ - fast: false, - } - for _, o := range opts { - o(&opt) - } - return opt -} - -// Fast causes validate to skip reading and digesting layer bytes. -func Fast(o *options) { - o.fast = true -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go deleted file mode 100644 index a47b7475ed..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go +++ /dev/null @@ -1,339 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Config) DeepCopyInto(out *Config) { - *out = *in - if in.Cmd != nil { - in, out := &in.Cmd, &out.Cmd - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Healthcheck != nil { - in, out := &in.Healthcheck, &out.Healthcheck - *out = new(HealthConfig) - (*in).DeepCopyInto(*out) - } - if in.Entrypoint != nil { - in, out := &in.Entrypoint, &out.Entrypoint - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.OnBuild != nil { - in, out := &in.OnBuild, &out.OnBuild - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make(map[string]struct{}, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ExposedPorts != nil { - in, out := &in.ExposedPorts, &out.ExposedPorts - *out = make(map[string]struct{}, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Shell != nil { - in, out := &in.Shell, &out.Shell - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. -func (in *Config) DeepCopy() *Config { - if in == nil { - return nil - } - out := new(Config) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigFile) DeepCopyInto(out *ConfigFile) { - *out = *in - in.Created.DeepCopyInto(&out.Created) - if in.History != nil { - in, out := &in.History, &out.History - *out = make([]History, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.RootFS.DeepCopyInto(&out.RootFS) - in.Config.DeepCopyInto(&out.Config) - if in.OSFeatures != nil { - in, out := &in.OSFeatures, &out.OSFeatures - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigFile. -func (in *ConfigFile) DeepCopy() *ConfigFile { - if in == nil { - return nil - } - out := new(ConfigFile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Descriptor) DeepCopyInto(out *Descriptor) { - *out = *in - out.Digest = in.Digest - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.URLs != nil { - in, out := &in.URLs, &out.URLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Platform != nil { - in, out := &in.Platform, &out.Platform - *out = new(Platform) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Descriptor. -func (in *Descriptor) DeepCopy() *Descriptor { - if in == nil { - return nil - } - out := new(Descriptor) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Hash) DeepCopyInto(out *Hash) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hash. -func (in *Hash) DeepCopy() *Hash { - if in == nil { - return nil - } - out := new(Hash) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HealthConfig) DeepCopyInto(out *HealthConfig) { - *out = *in - if in.Test != nil { - in, out := &in.Test, &out.Test - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthConfig. -func (in *HealthConfig) DeepCopy() *HealthConfig { - if in == nil { - return nil - } - out := new(HealthConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *History) DeepCopyInto(out *History) { - *out = *in - in.Created.DeepCopyInto(&out.Created) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new History. -func (in *History) DeepCopy() *History { - if in == nil { - return nil - } - out := new(History) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexManifest) DeepCopyInto(out *IndexManifest) { - *out = *in - if in.Manifests != nil { - in, out := &in.Manifests, &out.Manifests - *out = make([]Descriptor, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Subject != nil { - in, out := &in.Subject, &out.Subject - *out = new(Descriptor) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManifest. -func (in *IndexManifest) DeepCopy() *IndexManifest { - if in == nil { - return nil - } - out := new(IndexManifest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Manifest) DeepCopyInto(out *Manifest) { - *out = *in - in.Config.DeepCopyInto(&out.Config) - if in.Layers != nil { - in, out := &in.Layers, &out.Layers - *out = make([]Descriptor, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Subject != nil { - in, out := &in.Subject, &out.Subject - *out = new(Descriptor) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Manifest. -func (in *Manifest) DeepCopy() *Manifest { - if in == nil { - return nil - } - out := new(Manifest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Platform) DeepCopyInto(out *Platform) { - *out = *in - if in.OSFeatures != nil { - in, out := &in.OSFeatures, &out.OSFeatures - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Features != nil { - in, out := &in.Features, &out.Features - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. -func (in *Platform) DeepCopy() *Platform { - if in == nil { - return nil - } - out := new(Platform) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RootFS) DeepCopyInto(out *RootFS) { - *out = *in - if in.DiffIDs != nil { - in, out := &in.DiffIDs, &out.DiffIDs - *out = make([]Hash, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootFS. -func (in *RootFS) DeepCopy() *RootFS { - if in == nil { - return nil - } - out := new(RootFS) - in.DeepCopyInto(out) - return out -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. -func (in *Time) DeepCopy() *Time { - if in == nil { - return nil - } - out := new(Time) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/google/safetext/LICENSE b/vendor/github.com/google/safetext/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/google/safetext/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/google/safetext/common/common.go b/vendor/github.com/google/safetext/common/common.go deleted file mode 100644 index 80a8bbd97a..0000000000 --- a/vendor/github.com/google/safetext/common/common.go +++ /dev/null @@ -1,260 +0,0 @@ -/* - * - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package common implements common functionality for dealing with text/template. -package common - -import ( - "bytes" - "reflect" - "strings" - "sync" - "text/template" - "text/template/parse" - "unicode" - "unicode/utf8" -) - -// ContainsStringsWithSpecialCharacters determines whether an object contains interface{} strings that contain special characters. -func ContainsStringsWithSpecialCharacters(data interface{}, special string) bool { - if data == nil { - return false - } - - switch reflect.TypeOf(data).Kind() { - case reflect.Ptr: - p := reflect.ValueOf(data) - return !p.IsNil() && ContainsStringsWithSpecialCharacters(p.Elem().Interface(), special) - case reflect.String: - return strings.ContainsAny(reflect.ValueOf(data).String(), special) - case reflect.Slice, reflect.Array: - for i := 0; i < reflect.ValueOf(data).Len(); i++ { - if ContainsStringsWithSpecialCharacters(reflect.ValueOf(data).Index(i).Interface(), special) { - return true - } - } - case reflect.Map: - dataIter := reflect.ValueOf(data).MapRange() - for dataIter.Next() { - if ContainsStringsWithSpecialCharacters(dataIter.Value().Interface(), special) { - return true - } - } - case reflect.Struct: - t := reflect.TypeOf(data) - v := reflect.ValueOf(data) - n := v.NumField() - for i := 0; i < n; i++ { - r, _ := utf8.DecodeRuneInString(t.Field(i).Name) - if unicode.IsUpper(r) && ContainsStringsWithSpecialCharacters(v.Field(i).Interface(), special) { - return true - } - } - } - - return false -} - -// FuncMap to register new template objects with. -var FuncMap = map[string]interface{}{ - "textTemplateRemediationFunc": textTemplateRemediationFunc, - "StructuralData": echo, -} - -func echo(in interface{}) interface{} { - return in -} - -// BaselineString is a string callback function that just returns a constant string, -// used to get a baseline of how the resultant YAML is structured. -func BaselineString(string) string { - return "baseline" -} - -// stringCallback provides the callback for how strings should be manipulated before -// being pasted into the template execution result. -var stringCallback func(string) string -var stringCallbackLock sync.Mutex - -func textTemplateRemediationFunc(data interface{}) interface{} { - return deepCopyMutateStrings(data, stringCallback) -} - -// ExecuteWithCallback performs an execution on a callback-applied template -// (WalkApplyFuncToNonDeclaractiveActions) with a specified callback. -func ExecuteWithCallback(tmpl *template.Template, cb func(string) string, result *bytes.Buffer, data interface{}) error { - stringCallbackLock.Lock() - defer stringCallbackLock.Unlock() - stringCallback = cb - - return tmpl.Execute(result, data) -} - -func makePointer(data interface{}) interface{} { - rtype := reflect.New(reflect.TypeOf(data)) - rtype.Elem().Set(reflect.ValueOf(data)) - return rtype.Interface() -} - -func dereference(data interface{}) interface{} { - return reflect.ValueOf(data).Elem().Interface() -} - -func deepCopyMutateStrings(data interface{}, mutateF func(string) string) interface{} { - var r interface{} - - if data == nil { - return nil - } - - switch reflect.TypeOf(data).Kind() { - case reflect.Ptr: - p := reflect.ValueOf(data) - if p.IsNil() { - r = data - } else { - c := deepCopyMutateStrings(dereference(data), mutateF) - r = makePointer(c) - - // Sometimes we accidentally introduce one too minterface{} layers of indirection (seems related to protobuf generated fields like ReleaseNamespace *ReleaseNamespace `... reflect:"unexport"`) - if reflect.TypeOf(r) != reflect.TypeOf(data) { - r = c - } - } - case reflect.String: - return mutateF(reflect.ValueOf(data).String()) - case reflect.Slice, reflect.Array: - rc := reflect.MakeSlice(reflect.TypeOf(data), reflect.ValueOf(data).Len(), reflect.ValueOf(data).Len()) - for i := 0; i < reflect.ValueOf(data).Len(); i++ { - rc.Index(i).Set(reflect.ValueOf(deepCopyMutateStrings(reflect.ValueOf(data).Index(i).Interface(), mutateF))) - } - r = rc.Interface() - case reflect.Map: - rc := reflect.MakeMap(reflect.TypeOf(data)) - dataIter := reflect.ValueOf(data).MapRange() - for dataIter.Next() { - rc.SetMapIndex(dataIter.Key(), reflect.ValueOf(deepCopyMutateStrings(dataIter.Value().Interface(), mutateF))) - } - r = rc.Interface() - case reflect.Struct: - s := reflect.New(reflect.TypeOf(data)) - - t := reflect.TypeOf(data) - v := reflect.ValueOf(data) - n := v.NumField() - for i := 0; i < n; i++ { - r, _ := utf8.DecodeRuneInString(t.Field(i).Name) - - // Don't copy unexported fields - if unicode.IsUpper(r) { - reflect.Indirect(s).Field(i).Set( - reflect.ValueOf(deepCopyMutateStrings(v.Field(i).Interface(), mutateF)), - ) - } - } - - r = s.Interface() - default: - // No other types need special handling (int, bool, etc) - r = data - } - - return r -} - -func applyPipeCmds(cmds []*parse.CommandNode) { - applyFunc := "textTemplateRemediationFunc" - - for _, c := range cmds { - newArgs := make([]parse.Node, 0) - for i, a := range c.Args { - switch a := a.(type) { - case *parse.DotNode, *parse.FieldNode, *parse.VariableNode: - if i == 0 && len(c.Args) > 1 { - // If this is the first "argument" of multiple, then it is really a function - newArgs = append(newArgs, a) - } else { - // If this node is an argument to a call to "StructuralData", then pass it through as-is - switch identifier := c.Args[0].(type) { - case *parse.IdentifierNode: - if identifier.Ident == "StructuralData" { - newArgs = append(newArgs, a) - continue - } - } - - newPipe := &parse.PipeNode{NodeType: parse.NodePipe, Decl: nil} - newPipe.Cmds = []*parse.CommandNode{ - &parse.CommandNode{NodeType: parse.NodeCommand, Args: []parse.Node{a}}, - &parse.CommandNode{NodeType: parse.NodeCommand, Args: []parse.Node{&parse.IdentifierNode{NodeType: parse.NodeIdentifier, Ident: applyFunc}}}, - } - newArgs = append(newArgs, newPipe) - } - case *parse.PipeNode: - applyPipeCmds(a.Cmds) - newArgs = append(newArgs, a) - default: - newArgs = append(newArgs, a) - } - } - - c.Args = newArgs - } -} - -func branchNode(node parse.Node) *parse.BranchNode { - switch node := node.(type) { - case *parse.IfNode: - return &node.BranchNode - case *parse.RangeNode: - return &node.BranchNode - case *parse.WithNode: - return &node.BranchNode - } - - return nil -} - -// WalkApplyFuncToNonDeclaractiveActions walks the AST, applying a pipeline function to interface{} "paste" nodes (non-declarative action nodes) -func WalkApplyFuncToNonDeclaractiveActions(template *template.Template, node parse.Node) { - switch node := node.(type) { - case *parse.ActionNode: - // Non-declarative actions are paste actions - if len(node.Pipe.Decl) == 0 { - applyPipeCmds(node.Pipe.Cmds) - } - - case *parse.IfNode, *parse.RangeNode, *parse.WithNode: - nodeBranch := branchNode(node) - WalkApplyFuncToNonDeclaractiveActions(template, nodeBranch.List) - if nodeBranch.ElseList != nil { - WalkApplyFuncToNonDeclaractiveActions(template, nodeBranch.ElseList) - } - case *parse.ListNode: - for _, node := range node.Nodes { - WalkApplyFuncToNonDeclaractiveActions(template, node) - } - case *parse.TemplateNode: - tmpl := template.Lookup(node.Name) - if tmpl != nil { - treeCopy := tmpl.Tree.Copy() - WalkApplyFuncToNonDeclaractiveActions(tmpl, treeCopy.Root) - template.AddParseTree(node.Name, treeCopy) - } - } -} diff --git a/vendor/github.com/google/safetext/yamltemplate/yamltemplate.go b/vendor/github.com/google/safetext/yamltemplate/yamltemplate.go deleted file mode 100644 index 47d8f28389..0000000000 --- a/vendor/github.com/google/safetext/yamltemplate/yamltemplate.go +++ /dev/null @@ -1,657 +0,0 @@ -/* - * - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package yamltemplate is a drop-in-replacement for using text/template to produce YAML, that adds automatic detection for YAML injection -package yamltemplate - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/fs" - "os" - "path" - "path/filepath" - "reflect" - "text/template" - "text/template/parse" - "unicode" - "unicode/utf8" - - "gopkg.in/yaml.v3" - - "github.com/google/safetext/common" -) - -// ErrInvalidYAMLTemplate indicates the requested template is not valid YAML. -var ErrInvalidYAMLTemplate error = errors.New("Invalid YAML Template") - -// ErrYAMLInjection indicates the inputs resulted in YAML injection. -var ErrYAMLInjection error = errors.New("YAML Injection Detected") - -// ExecError is the custom error type returned when Execute has an -// error evaluating its template. (If a write error occurs, the actual -// error is returned; it will not be of type ExecError.) -type ExecError = template.ExecError - -// FuncMap is the type of the map defining the mapping from names to functions. -// Each function must have either a single return value, or two return values of -// which the second has type error. In that case, if the second (error) -// return value evaluates to non-nil during execution, execution terminates and -// Execute returns that error. -// -// Errors returned by Execute wrap the underlying error; call errors.As to -// uncover them. -// -// When template execution invokes a function with an argument list, that list -// must be assignable to the function's parameter types. Functions meant to -// apply to arguments of arbitrary type can use parameters of type interface{} or -// of type reflect.Value. Similarly, functions meant to return a result of arbitrary -// type can return interface{} or reflect.Value. -type FuncMap = template.FuncMap - -// Template is the representation of a parsed template. The *parse.Tree -// field is exported only for use by html/template and should be treated -// as unexported by all other clients. -type Template struct { - unsafeTemplate *template.Template -} - -// New allocates a new, undefined template with the given name. -func New(name string) *Template { - return &Template{unsafeTemplate: template.New(name).Funcs(common.FuncMap)} -} - -const yamlSpecialCharacters = "{}[]&*#?|-.<>=!%@:\"'`,\r\n" - -func mapOrArray(in interface{}) bool { - return in != nil && (reflect.TypeOf(in).Kind() == reflect.Map || reflect.TypeOf(in).Kind() == reflect.Slice || reflect.TypeOf(in).Kind() == reflect.Array) -} - -func allKeysMatch(base interface{}, a interface{}, b interface{}) bool { - if base == nil { - return a == nil && b == nil - } - - switch reflect.TypeOf(base).Kind() { - case reflect.Ptr: - if reflect.TypeOf(a).Kind() != reflect.Ptr || reflect.TypeOf(b).Kind() != reflect.Ptr { - return false - } - - if !allKeysMatch(reflect.ValueOf(base).Elem().Interface(), reflect.ValueOf(a).Elem().Interface(), reflect.ValueOf(b).Elem().Interface()) { - return true - } - case reflect.Map: - if reflect.TypeOf(a).Kind() != reflect.Map || reflect.TypeOf(b).Kind() != reflect.Map { - return false - } - - if reflect.ValueOf(a).Len() != reflect.ValueOf(base).Len() || reflect.ValueOf(b).Len() != reflect.ValueOf(base).Len() { - return false - } - - basei := reflect.ValueOf(base).MapRange() - for basei.Next() { - av := reflect.ValueOf(a).MapIndex(basei.Key()) - bv := reflect.ValueOf(b).MapIndex(basei.Key()) - if !av.IsValid() || !bv.IsValid() || - !allKeysMatch(basei.Value().Interface(), av.Interface(), bv.Interface()) { - return false - } - } - case reflect.Slice, reflect.Array: - if reflect.TypeOf(a).Kind() != reflect.Slice && reflect.TypeOf(a).Kind() != reflect.Array && - reflect.TypeOf(b).Kind() != reflect.Slice && reflect.TypeOf(b).Kind() != reflect.Array { - return false - } - - if reflect.ValueOf(a).Len() != reflect.ValueOf(base).Len() || reflect.ValueOf(b).Len() != reflect.ValueOf(base).Len() { - return false - } - - for i := 0; i < reflect.ValueOf(base).Len(); i++ { - if !allKeysMatch(reflect.ValueOf(base).Index(i).Interface(), reflect.ValueOf(a).Index(i).Interface(), reflect.ValueOf(b).Index(i).Interface()) { - return false - } - } - case reflect.Struct: - n := reflect.ValueOf(base).NumField() - for i := 0; i < n; i++ { - baseit := reflect.TypeOf(base).Field(i) - ait := reflect.TypeOf(a).Field(i) - bit := reflect.TypeOf(b).Field(i) - - if baseit.Name != ait.Name || baseit.Name != bit.Name { - return false - } - - // Only compare public members (private members cannot be overwritten by text/template) - decodedName, _ := utf8.DecodeRuneInString(baseit.Name) - if unicode.IsUpper(decodedName) { - basei := reflect.ValueOf(base).Field(i) - ai := reflect.ValueOf(a).Field(i) - bi := reflect.ValueOf(b).Field(i) - - if !allKeysMatch(basei.Interface(), ai.Interface(), bi.Interface()) { - return false - } - } - } - case reflect.String: - // Baseline type of string was chosen arbitrarily, so just check that there isn't a new a map or slice/array injected, which would change the structure of the YAML - if mapOrArray(a) || mapOrArray(b) { - return false - } - default: - if reflect.TypeOf(a) != reflect.TypeOf(base) || reflect.TypeOf(b) != reflect.TypeOf(base) { - return false - } - } - - return true -} - -func unmarshalYaml(data []byte) ([]interface{}, error) { - r := make([]interface{}, 0) - - decoder := yaml.NewDecoder(bytes.NewReader(data)) - for { - var t interface{} - err := decoder.Decode(&t) - - if err == io.EOF { - break - } else if err == nil { - r = append(r, t) - } else { - return nil, err - } - } - - return r, nil -} - -// Mutation algorithm -func mutateString(s string) string { - // Longest possible output string is 2x the original - out := make([]rune, len(s)*2) - - i := 0 - for _, r := range s { - out[i] = r - i++ - - // Don't repeat quoting-related characters so as to not allow YAML context change in the mutation result - if r != '\\' && r != '\'' && r != '"' { - out[i] = r - i++ - } - } - - return string(out[:i]) -} - -// Execute applies a parsed template to the specified data object, -// and writes the output to wr. -// If an error occurs executing the template or writing its output, -// execution stops, but partial results may already have been written to -// the output writer. -// A template may be executed safely in parallel, although if parallel -// executions share a Writer the output may be interleaved. -// -// If data is a reflect.Value, the template applies to the concrete -// value that the reflect.Value holds, as in fmt.Print. -func (t *Template) Execute(wr io.Writer, data interface{}) (err error) { - if data == nil { - return t.unsafeTemplate.Execute(wr, data) - } - - // An attacker may be able to cause type confusion or nil dereference panic during allKeysMatch - defer func() { - if r := recover(); r != nil { - err = ErrYAMLInjection - } - }() - - // Calculate requested result first - var requestedResult bytes.Buffer - - if err := t.unsafeTemplate.Execute(&requestedResult, data); err != nil { - return err - } - - // Fast path for if there are no YAML special characters in the input strings - if !common.ContainsStringsWithSpecialCharacters(data, yamlSpecialCharacters) { - // Note: We assume the result was valid YAML, and don't check for ErrInvalidYAMLTemplate - requestedResult.WriteTo(wr) - return nil - } - - walked, err := t.unsafeTemplate.Clone() - if err != nil { - return err - } - walked.Tree = walked.Tree.Copy() - - common.WalkApplyFuncToNonDeclaractiveActions(walked, walked.Tree.Root) - - // Get baseline - var baselineResult bytes.Buffer - if err = common.ExecuteWithCallback(walked, common.BaselineString, &baselineResult, data); err != nil { - return err - } - - parsedBaselineResult, err := unmarshalYaml(baselineResult.Bytes()) - if err != nil { - return ErrInvalidYAMLTemplate - } - - // If baseline was valid, request must also be valid YAML for no injection to have occurred - parsedRequestedResult, err := unmarshalYaml(requestedResult.Bytes()) - if err != nil { - return ErrYAMLInjection - } - - // Mutate the input - var mutatedResult bytes.Buffer - if err = common.ExecuteWithCallback(walked, mutateString, &mutatedResult, data); err != nil { - return err - } - - parsedMutatedResult, err := unmarshalYaml(mutatedResult.Bytes()) - if err != nil { - return ErrYAMLInjection - } - - // Compare results - if !allKeysMatch(parsedBaselineResult, parsedRequestedResult, parsedMutatedResult) { - return ErrYAMLInjection - } - - requestedResult.WriteTo(wr) - return nil -} - -// Name returns the name of the template. -func (t *Template) Name() string { - return t.unsafeTemplate.Name() -} - -// New allocates a new, undefined template associated with the given one and with the same -// delimiters. The association, which is transitive, allows one template to -// invoke another with a {{template}} action. -// -// Because associated templates share underlying data, template construction -// cannot be done safely in parallel. Once the templates are constructed, they -// can be executed in parallel. -func (t *Template) New(name string) *Template { - return &Template{unsafeTemplate: t.unsafeTemplate.New(name).Funcs(common.FuncMap)} -} - -// Clone returns a duplicate of the template, including all associated -// templates. The actual representation is not copied, but the name space of -// associated templates is, so further calls to Parse in the copy will add -// templates to the copy but not to the original. Clone can be used to prepare -// common templates and use them with variant definitions for other templates -// by adding the variants after the clone is made. -func (t *Template) Clone() (*Template, error) { - nt, err := t.unsafeTemplate.Clone() - return &Template{unsafeTemplate: nt}, err -} - -// AddParseTree associates the argument parse tree with the template t, giving -// it the specified name. If the template has not been defined, this tree becomes -// its definition. If it has been defined and already has that name, the existing -// definition is replaced; otherwise a new template is created, defined, and returned. -func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) { - nt, err := t.unsafeTemplate.AddParseTree(name, tree) - - if nt != t.unsafeTemplate { - return &Template{unsafeTemplate: nt}, err - } - return t, err -} - -// Option sets options for the template. Options are described by -// strings, either a simple string or "key=value". There can be at -// most one equals sign in an option string. If the option string -// is unrecognized or otherwise invalid, Option panics. -// -// Known options: -// -// missingkey: Control the behavior during execution if a map is -// indexed with a key that is not present in the map. -// -// "missingkey=default" or "missingkey=invalid" -// The default behavior: Do nothing and continue execution. -// If printed, the result of the index operation is the string -// "". -// "missingkey=zero" -// The operation returns the zero value for the map type's element. -// "missingkey=error" -// Execution stops immediately with an error. -func (t *Template) Option(opt ...string) *Template { - for _, s := range opt { - t.unsafeTemplate.Option(s) - } - return t -} - -// Templates returns a slice of defined templates associated with t. -func (t *Template) Templates() []*Template { - s := t.unsafeTemplate.Templates() - - var ns []*Template - for _, nt := range s { - ns = append(ns, &Template{unsafeTemplate: nt}) - } - - return ns -} - -// ExecuteTemplate applies the template associated with t that has the given name -// to the specified data object and writes the output to wr. -// If an error occurs executing the template or writing its output, -// execution stops, but partial results may already have been written to -// the output writer. -// A template may be executed safely in parallel, although if parallel -// executions share a Writer the output may be interleaved. -func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error { - tmpl := t.Lookup(name) - if tmpl == nil { - return fmt.Errorf("template: no template %q associated with template %q", name, t.Name()) - } - return tmpl.Execute(wr, data) -} - -// Delims sets the action delimiters to the specified strings, to be used in -// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template -// definitions will inherit the settings. An empty delimiter stands for the -// corresponding default: {{ or }}. -// The return value is the template, so calls can be chained. -func (t *Template) Delims(left, right string) *Template { - t.unsafeTemplate.Delims(left, right) - return t -} - -// DefinedTemplates returns a string listing the defined templates, -// prefixed by the string "; defined templates are: ". If there are none, -// it returns the empty string. For generating an error message here -// and in html/template. -func (t *Template) DefinedTemplates() string { - return t.unsafeTemplate.DefinedTemplates() -} - -// Funcs adds the elements of the argument map to the template's function map. -// It must be called before the template is parsed. -// It panics if a value in the map is not a function with appropriate return -// type or if the name cannot be used syntactically as a function in a template. -// It is legal to overwrite elements of the map. The return value is the template, -// so calls can be chained. -func (t *Template) Funcs(funcMap FuncMap) *Template { - t.unsafeTemplate.Funcs(funcMap) - return t -} - -// Lookup returns the template with the given name that is associated with t. -// It returns nil if there is no such template or the template has no definition. -func (t *Template) Lookup(name string) *Template { - nt := t.unsafeTemplate.Lookup(name) - - if nt == nil { - return nil - } - - if nt != t.unsafeTemplate { - return &Template{unsafeTemplate: nt} - } - - return t -} - -// Parse parses text as a template body for t. -// Named template definitions ({{define ...}} or {{block ...}} statements) in text -// define additional templates associated with t and are removed from the -// definition of t itself. -// -// Templates can be redefined in successive calls to Parse. -// A template definition with a body containing only white space and comments -// is considered empty and will not replace an existing template's body. -// This allows using Parse to add new named template definitions without -// overwriting the main template body. -func (t *Template) Parse(text string) (*Template, error) { - nt, err := t.unsafeTemplate.Parse(text) - - if nt != t.unsafeTemplate { - return &Template{unsafeTemplate: nt}, err - } - - return t, err -} - -// Must is a helper that wraps a call to a function returning (*Template, error) -// and panics if the error is non-nil. It is intended for use in variable -// initializations such as -// -// var t = template.Must(template.New("name").Parse("text")) -func Must(t *Template, err error) *Template { - if err != nil { - panic(err) - } - return t -} - -func readFileOS(file string) (name string, b []byte, err error) { - name = filepath.Base(file) - b, err = os.ReadFile(file) - return -} - -func readFileFS(fsys fs.FS) func(string) (string, []byte, error) { - return func(file string) (name string, b []byte, err error) { - name = path.Base(file) - b, err = fs.ReadFile(fsys, file) - return - } -} - -func parseFiles(t *Template, readFile func(string) (string, []byte, error), filenames ...string) (*Template, error) { - if len(filenames) == 0 { - // Not really a problem, but be consistent. - return nil, fmt.Errorf("template: no files named in call to ParseFiles") - } - for _, filename := range filenames { - name, b, err := readFile(filename) - if err != nil { - return nil, err - } - s := string(b) - // First template becomes return value if not already defined, - // and we use that one for subsequent New calls to associate - // all the templates together. Also, if this file has the same name - // as t, this file becomes the contents of t, so - // t, err := New(name).Funcs(xxx).ParseFiles(name) - // works. Otherwise we create a new template associated with t. - var tmpl *Template - if t == nil { - t = New(name) - } - if name == t.Name() { - tmpl = t - } else { - tmpl = t.New(name) - } - _, err = tmpl.Parse(s) - if err != nil { - return nil, err - } - } - return t, nil -} - -// parseGlob is the implementation of the function and method ParseGlob. -func parseGlob(t *Template, pattern string) (*Template, error) { - filenames, err := filepath.Glob(pattern) - if err != nil { - return nil, err - } - if len(filenames) == 0 { - return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) - } - return parseFiles(t, readFileOS, filenames...) -} - -func parseFS(t *Template, fsys fs.FS, patterns []string) (*Template, error) { - var filenames []string - for _, pattern := range patterns { - list, err := fs.Glob(fsys, pattern) - if err != nil { - return nil, err - } - if len(list) == 0 { - return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) - } - filenames = append(filenames, list...) - } - return parseFiles(t, readFileFS(fsys), filenames...) -} - -// ParseFiles creates a new Template and parses the template definitions from -// the named files. The returned template's name will have the base name and -// parsed contents of the first file. There must be at least one file. -// If an error occurs, parsing stops and the returned *Template is nil. -// -// When parsing multiple files with the same name in different directories, -// the last one mentioned will be the one that results. -// For instance, ParseFiles("a/foo", "b/foo") stores "b/foo" as the template -// named "foo", while "a/foo" is unavailable. -func ParseFiles(filenames ...string) (*Template, error) { - return parseFiles(nil, readFileOS, filenames...) -} - -// ParseFiles parses the named files and associates the resulting templates with -// t. If an error occurs, parsing stops and the returned template is nil; -// otherwise it is t. There must be at least one file. -// Since the templates created by ParseFiles are named by the base -// names of the argument files, t should usually have the name of one -// of the (base) names of the files. If it does not, depending on t's -// contents before calling ParseFiles, t.Execute may fail. In that -// case use t.ExecuteTemplate to execute a valid template. -// -// When parsing multiple files with the same name in different directories, -// the last one mentioned will be the one that results. -func (t *Template) ParseFiles(filenames ...string) (*Template, error) { - // Ensure template is inited - t.Option() - - return parseFiles(t, readFileOS, filenames...) -} - -// ParseGlob creates a new Template and parses the template definitions from -// the files identified by the pattern. The files are matched according to the -// semantics of filepath.Match, and the pattern must match at least one file. -// The returned template will have the (base) name and (parsed) contents of the -// first file matched by the pattern. ParseGlob is equivalent to calling -// ParseFiles with the list of files matched by the pattern. -// -// When parsing multiple files with the same name in different directories, -// the last one mentioned will be the one that results. -func ParseGlob(pattern string) (*Template, error) { - return parseGlob(nil, pattern) -} - -// ParseGlob parses the template definitions in the files identified by the -// pattern and associates the resulting templates with t. The files are matched -// according to the semantics of filepath.Match, and the pattern must match at -// least one file. ParseGlob is equivalent to calling t.ParseFiles with the -// list of files matched by the pattern. -// -// When parsing multiple files with the same name in different directories, -// the last one mentioned will be the one that results. -func (t *Template) ParseGlob(pattern string) (*Template, error) { - // Ensure template is inited - t.Option() - - return parseGlob(t, pattern) -} - -// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys -// instead of the host operating system's file system. -// It accepts a list of glob patterns. -// (Note that most file names serve as glob patterns matching only themselves.) -func ParseFS(fsys fs.FS, patterns ...string) (*Template, error) { - return parseFS(nil, fsys, patterns) -} - -// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys -// instead of the host operating system's file system. -// It accepts a list of glob patterns. -// (Note that most file names serve as glob patterns matching only themselves.) -func (t *Template) ParseFS(fsys fs.FS, patterns ...string) (*Template, error) { - // Ensure template is inited - t.Option() - - return parseFS(t, fsys, patterns) -} - -// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. -func HTMLEscape(w io.Writer, b []byte) { - template.HTMLEscape(w, b) -} - -// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. -func HTMLEscapeString(s string) string { - return template.HTMLEscapeString(s) -} - -// HTMLEscaper returns the escaped HTML equivalent of the textual -// representation of its arguments. -func HTMLEscaper(args ...interface{}) string { - return template.HTMLEscaper(args) -} - -// IsTrue reports whether the value is 'true', in the sense of not the zero of its type, -// and whether the value has a meaningful truth value. This is the definition of -// truth used by if and other such actions. -func IsTrue(val interface{}) (truth, ok bool) { - return template.IsTrue(val) -} - -// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. -func JSEscape(w io.Writer, b []byte) { - template.JSEscape(w, b) -} - -// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. -func JSEscapeString(s string) string { - return template.JSEscapeString(s) -} - -// JSEscaper returns the escaped JavaScript equivalent of the textual -// representation of its arguments. -func JSEscaper(args ...interface{}) string { - return template.JSEscaper(args) -} - -// URLQueryEscaper returns the escaped value of the textual representation of -// its arguments in a form suitable for embedding in a URL query. -func URLQueryEscaper(args ...interface{}) string { - return template.URLQueryEscaper(args) -} diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md deleted file mode 100644 index 7ec5ac7ea9..0000000000 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ /dev/null @@ -1,41 +0,0 @@ -# Changelog - -## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) - - -### Features - -* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) - - -### Bug Fixes - -* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) -* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) - -## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) - - -### Features - -* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) - -## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) - - -### Features - -* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) - -### Fixes - -* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) - -## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) - - -### Bug Fixes - -* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) - -## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md deleted file mode 100644 index a502fdc515..0000000000 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,26 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Tips - -Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). - -Always try to include a test case! If it is not possible or not necessary, -please explain why in the pull request description. - -### Releasing - -Commits that would precipitate a SemVer change, as described in the Conventional -Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) -to create a release candidate pull request. Once submitted, `release-please` -will create a release. - -For tips on how to work with `release-please`, see its documentation. - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS deleted file mode 100644 index b4bb97f6bc..0000000000 --- a/vendor/github.com/google/uuid/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Paul Borman -bmatsuo -shawnps -theory -jboverfelt -dsymonds -cd1 -wallclockbuilder -dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE deleted file mode 100644 index 5dc68268d9..0000000000 --- a/vendor/github.com/google/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md deleted file mode 100644 index 3e9a61889d..0000000000 --- a/vendor/github.com/google/uuid/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# uuid -The uuid package generates and inspects UUIDs based on -[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -```sh -go get github.com/google/uuid -``` - -###### Documentation -[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go deleted file mode 100644 index fa820b9d30..0000000000 --- a/vendor/github.com/google/uuid/dce.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) (UUID, error) { - uuid, err := NewUUID() - if err == nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid, err -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCESecurity(Person, uint32(os.Getuid())) -func NewDCEPerson() (UUID, error) { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCESecurity(Group, uint32(os.Getgid())) -func NewDCEGroup() (UUID, error) { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID. Domains are only defined -// for Version 2 UUIDs. -func (uuid UUID) Domain() Domain { - return Domain(uuid[9]) -} - -// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 -// UUIDs. -func (uuid UUID) ID() uint32 { - return binary.BigEndian.Uint32(uuid[0:4]) -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go deleted file mode 100644 index 5b8a4b9af8..0000000000 --- a/vendor/github.com/google/uuid/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uuid generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to -// maps or compared directly. -package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go deleted file mode 100644 index dc60082d3b..0000000000 --- a/vendor/github.com/google/uuid/hash.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known namespace IDs and UUIDs -var ( - NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) - Nil UUID // empty UUID, all zeros - - // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. - Max = UUID{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - } -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space[:]) //nolint:errcheck - h.Write(data) //nolint:errcheck - s := h.Sum(nil) - var uuid UUID - copy(uuid[:], s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go deleted file mode 100644 index 14bd34072b..0000000000 --- a/vendor/github.com/google/uuid/marshal.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "fmt" - -// MarshalText implements encoding.TextMarshaler. -func (uuid UUID) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], uuid) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (uuid *UUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err != nil { - return err - } - *uuid = id - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (uuid UUID) MarshalBinary() ([]byte, error) { - return uuid[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (uuid *UUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(uuid[:], data) - return nil -} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go deleted file mode 100644 index d651a2b061..0000000000 --- a/vendor/github.com/google/uuid/node.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "sync" -) - -var ( - nodeMu sync.Mutex - ifname string // name of interface being used - nodeID [6]byte // hardware for version 1 UUIDs - zeroID [6]byte // nodeID with only 0's -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - iname, addr := getHardwareInterface(name) // null implementation for js - if iname != "" && addr != nil { - ifname = iname - copy(nodeID[:], addr) - return true - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - ifname = "random" - randomBits(nodeID[:]) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nid := nodeID - return nid[:] -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - defer nodeMu.Unlock() - nodeMu.Lock() - copy(nodeID[:], id) - ifname = "user" - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - var node [6]byte - copy(node[:], uuid[10:]) - return node[:] -} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go deleted file mode 100644 index b2a0bc8711..0000000000 --- a/vendor/github.com/google/uuid/node_js.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js - -package uuid - -// getHardwareInterface returns nil values for the JS version of the code. -// This removes the "net" dependency, because it is not used in the browser. -// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. -func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go deleted file mode 100644 index 0cbbcddbd6..0000000000 --- a/vendor/github.com/google/uuid/node_net.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !js - -package uuid - -import "net" - -var interfaces []net.Interface // cached list of interfaces - -// getHardwareInterface returns the name and hardware address of interface name. -// If name is "" then the name and hardware address of one of the system's -// interfaces is returned. If no interfaces are found (name does not exist or -// there are no interfaces) then "", nil is returned. -// -// Only addresses of at least 6 bytes are returned. -func getHardwareInterface(name string) (string, []byte) { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil { - return "", nil - } - } - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - return ifs.Name, ifs.HardwareAddr - } - } - return "", nil -} diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go deleted file mode 100644 index d7fcbf2865..0000000000 --- a/vendor/github.com/google/uuid/null.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2021 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "database/sql/driver" - "encoding/json" - "fmt" -) - -var jsonNull = []byte("null") - -// NullUUID represents a UUID that may be null. -// NullUUID implements the SQL driver.Scanner interface so -// it can be used as a scan destination: -// -// var u uuid.NullUUID -// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) -// ... -// if u.Valid { -// // use u.UUID -// } else { -// // NULL value -// } -// -type NullUUID struct { - UUID UUID - Valid bool // Valid is true if UUID is not NULL -} - -// Scan implements the SQL driver.Scanner interface. -func (nu *NullUUID) Scan(value interface{}) error { - if value == nil { - nu.UUID, nu.Valid = Nil, false - return nil - } - - err := nu.UUID.Scan(value) - if err != nil { - nu.Valid = false - return err - } - - nu.Valid = true - return nil -} - -// Value implements the driver Valuer interface. -func (nu NullUUID) Value() (driver.Value, error) { - if !nu.Valid { - return nil, nil - } - // Delegate to UUID Value function - return nu.UUID.Value() -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (nu NullUUID) MarshalBinary() ([]byte, error) { - if nu.Valid { - return nu.UUID[:], nil - } - - return []byte(nil), nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (nu *NullUUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(nu.UUID[:], data) - nu.Valid = true - return nil -} - -// MarshalText implements encoding.TextMarshaler. -func (nu NullUUID) MarshalText() ([]byte, error) { - if nu.Valid { - return nu.UUID.MarshalText() - } - - return jsonNull, nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (nu *NullUUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err != nil { - nu.Valid = false - return err - } - nu.UUID = id - nu.Valid = true - return nil -} - -// MarshalJSON implements json.Marshaler. -func (nu NullUUID) MarshalJSON() ([]byte, error) { - if nu.Valid { - return json.Marshal(nu.UUID) - } - - return jsonNull, nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (nu *NullUUID) UnmarshalJSON(data []byte) error { - if bytes.Equal(data, jsonNull) { - *nu = NullUUID{} - return nil // valid null UUID - } - err := json.Unmarshal(data, &nu.UUID) - nu.Valid = err == nil - return err -} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go deleted file mode 100644 index 2e02ec06c0..0000000000 --- a/vendor/github.com/google/uuid/sql.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently. -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string: - // if an empty UUID comes from a table, we return a null UUID - if src == "" { - return nil - } - - // see Parse for required string format - u, err := Parse(src) - if err != nil { - return fmt.Errorf("Scan: %v", err) - } - - *uuid = u - - case []byte: - // if an empty UUID comes from a table, we return a null UUID - if len(src) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(src) != 16 { - return uuid.Scan(string(src)) - } - copy((*uuid)[:], src) - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go deleted file mode 100644 index c351129279..0000000000 --- a/vendor/github.com/google/uuid/time.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clockSeq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clockSeq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clockSeq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence is used, a new -// random clock sequence is generated the first time a clock sequence is -// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clockSeq == 0 { - setClockSequence(-1) - } - return int(clockSeq & 0x3fff) -} - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - oldSeq := clockSeq - clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if oldSeq != clockSeq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. -func (uuid UUID) Time() Time { - var t Time - switch uuid.Version() { - case 6: - time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 - t = Time(time) - case 7: - time := binary.BigEndian.Uint64(uuid[:8]) - t = Time((time>>16)*10000 + g1582ns100) - default: // forward compatible - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - t = Time(time) - } - return t -} - -// ClockSequence returns the clock sequence encoded in uuid. -// The clock sequence is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) ClockSequence() int { - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff -} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go deleted file mode 100644 index 5ea6c73780..0000000000 --- a/vendor/github.com/google/uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts hex characters x1 and x2 into a byte. -func xtob(x1, x2 byte) (byte, bool) { - b1 := xvalues[x1] - b2 := xvalues[x2] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go deleted file mode 100644 index 5232b48678..0000000000 --- a/vendor/github.com/google/uuid/uuid.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" - "sync" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID [16]byte - -// A Version represents a UUID's version. -type Version byte - -// A Variant represents a UUID's variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -const randPoolSize = 16 * 16 - -var ( - rander = rand.Reader // random function - poolEnabled = false - poolMu sync.Mutex - poolPos = randPoolSize // protected with poolMu - pool [randPoolSize]byte // protected with poolMu -) - -type invalidLengthError struct{ len int } - -func (err invalidLengthError) Error() string { - return fmt.Sprintf("invalid UUID length: %d", err.len) -} - -// IsInvalidLengthError is matcher function for custom error invalidLengthError -func IsInvalidLengthError(err error) bool { - _, ok := err.(invalidLengthError) - return ok -} - -// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both -// the standard UUID forms defined in RFC 4122 -// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, -// Parse accepts non-standard strings such as the raw hex encoding -// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, -// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are -// examined in the latter case. Parse should not be used to validate strings as -// it parses non-standard encodings as indicated above. -func Parse(s string) (UUID, error) { - var uuid UUID - switch len(s) { - // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36: - - // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: - if !strings.EqualFold(s[:9], "urn:uuid:") { - return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - case 36 + 2: - s = s[1:] - - // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - case 32: - var ok bool - for i := range uuid { - uuid[i], ok = xtob(s[i*2], s[i*2+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, invalidLengthError{len(s)} - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34, - } { - v, ok := xtob(s[x], s[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - var uuid UUID - switch len(b) { - case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { - return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) - } - b = b[9:] - case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - b = b[1:] - case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - var ok bool - for i := 0; i < 32; i += 2 { - uuid[i/2], ok = xtob(b[i], b[i+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, invalidLengthError{len(b)} - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34, - } { - v, ok := xtob(b[x], b[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// MustParse is like Parse but panics if the string cannot be parsed. -// It simplifies safe initialization of global variables holding compiled UUIDs. -func MustParse(s string) UUID { - uuid, err := Parse(s) - if err != nil { - panic(`uuid: Parse(` + s + `): ` + err.Error()) - } - return uuid -} - -// FromBytes creates a new UUID from a byte slice. Returns an error if the slice -// does not have a length of 16. The bytes are copied from the slice. -func FromBytes(b []byte) (uuid UUID, err error) { - err = uuid.UnmarshalBinary(b) - return uuid, err -} - -// Must returns uuid if err is nil and panics otherwise. -func Must(uuid UUID, err error) UUID { - if err != nil { - panic(err) - } - return uuid -} - -// Validate returns an error if s is not a properly formatted UUID in one of the following formats: -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx -// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} -// It returns an error if the format is invalid, otherwise nil. -func Validate(s string) error { - switch len(s) { - // Standard UUID format - case 36: - - // UUID with "urn:uuid:" prefix - case 36 + 9: - if !strings.EqualFold(s[:9], "urn:uuid:") { - return fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // UUID enclosed in braces - case 36 + 2: - if s[0] != '{' || s[len(s)-1] != '}' { - return fmt.Errorf("invalid bracketed UUID format") - } - s = s[1 : len(s)-1] - - // UUID without hyphens - case 32: - for i := 0; i < len(s); i += 2 { - _, ok := xtob(s[i], s[i+1]) - if !ok { - return errors.New("invalid UUID format") - } - } - - default: - return invalidLengthError{len(s)} - } - - // Check for standard UUID format - if len(s) == 36 { - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return errors.New("invalid UUID format") - } - for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { - if _, ok := xtob(s[x], s[x+1]); !ok { - return errors.New("invalid UUID format") - } - } - } - - return nil -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst, uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. -func (uuid UUID) Variant() Variant { - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. -func (uuid UUID) Version() Version { - return Version(uuid[6] >> 4) -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} - -// EnableRandPool enables internal randomness pool used for Random -// (Version 4) UUID generation. The pool contains random bytes read from -// the random number generator on demand in batches. Enabling the pool -// may improve the UUID generation throughput significantly. -// -// Since the pool is stored on the Go heap, this feature may be a bad fit -// for security sensitive applications. -// -// Both EnableRandPool and DisableRandPool are not thread-safe and should -// only be called when there is no possibility that New or any other -// UUID Version 4 generation function will be called concurrently. -func EnableRandPool() { - poolEnabled = true -} - -// DisableRandPool disables the randomness pool if it was previously -// enabled with EnableRandPool. -// -// Both EnableRandPool and DisableRandPool are not thread-safe and should -// only be called when there is no possibility that New or any other -// UUID Version 4 generation function will be called concurrently. -func DisableRandPool() { - poolEnabled = false - defer poolMu.Unlock() - poolMu.Lock() - poolPos = randPoolSize -} - -// UUIDs is a slice of UUID types. -type UUIDs []UUID - -// Strings returns a string slice containing the string form of each UUID in uuids. -func (uuids UUIDs) Strings() []string { - var uuidStrs = make([]string, len(uuids)) - for i, uuid := range uuids { - uuidStrs[i] = uuid.String() - } - return uuidStrs -} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go deleted file mode 100644 index 463109629e..0000000000 --- a/vendor/github.com/google/uuid/version1.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil and an error. -// -// In most cases, New should be used. -func NewUUID() (UUID, error) { - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - timeLow := uint32(now & 0xffffffff) - timeMid := uint16((now >> 32) & 0xffff) - timeHi := uint16((now >> 48) & 0x0fff) - timeHi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], timeLow) - binary.BigEndian.PutUint16(uuid[4:], timeMid) - binary.BigEndian.PutUint16(uuid[6:], timeHi) - binary.BigEndian.PutUint16(uuid[8:], seq) - - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - copy(uuid[10:], nodeID[:]) - nodeMu.Unlock() - - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go deleted file mode 100644 index 7697802e4d..0000000000 --- a/vendor/github.com/google/uuid/version4.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "io" - -// New creates a new random UUID or panics. New is equivalent to -// the expression -// -// uuid.Must(uuid.NewRandom()) -func New() UUID { - return Must(NewRandom()) -} - -// NewString creates a new random UUID and returns it as a string or panics. -// NewString is equivalent to the expression -// -// uuid.New().String() -func NewString() string { - return Must(NewRandom()).String() -} - -// NewRandom returns a Random (Version 4) UUID. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// Uses the randomness pool if it was enabled with EnableRandPool. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() (UUID, error) { - if !poolEnabled { - return NewRandomFromReader(rander) - } - return newRandomFromPool() -} - -// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. -func NewRandomFromReader(r io.Reader) (UUID, error) { - var uuid UUID - _, err := io.ReadFull(r, uuid[:]) - if err != nil { - return Nil, err - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} - -func newRandomFromPool() (UUID, error) { - var uuid UUID - poolMu.Lock() - if poolPos == randPoolSize { - _, err := io.ReadFull(rander, pool[:]) - if err != nil { - poolMu.Unlock() - return Nil, err - } - poolPos = 0 - } - copy(uuid[:], pool[poolPos:(poolPos+16)]) - poolPos += 16 - poolMu.Unlock() - - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go deleted file mode 100644 index 339a959a7a..0000000000 --- a/vendor/github.com/google/uuid/version6.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2023 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "encoding/binary" - -// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. -// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. -// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. -// -// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 -// -// NewV6 returns a Version 6 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewV6 returns Nil and an error. -func NewV6() (UUID, error) { - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - /* - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | time_high | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | time_mid | time_low_and_version | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - |clk_seq_hi_res | clk_seq_low | node (0-1) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | node (2-5) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - */ - - binary.BigEndian.PutUint64(uuid[0:], uint64(now)) - binary.BigEndian.PutUint16(uuid[8:], seq) - - uuid[6] = 0x60 | (uuid[6] & 0x0F) - uuid[8] = 0x80 | (uuid[8] & 0x3F) - - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - copy(uuid[10:], nodeID[:]) - nodeMu.Unlock() - - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go deleted file mode 100644 index 3167b643d4..0000000000 --- a/vendor/github.com/google/uuid/version7.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2023 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// UUID version 7 features a time-ordered value field derived from the widely -// implemented and well known Unix Epoch timestamp source, -// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. -// As well as improved entropy characteristics over versions 1 or 6. -// -// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 -// -// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. -// -// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). -// Uses the randomness pool if it was enabled with EnableRandPool. -// On error, NewV7 returns Nil and an error -func NewV7() (UUID, error) { - uuid, err := NewRandom() - if err != nil { - return uuid, err - } - makeV7(uuid[:]) - return uuid, nil -} - -// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). -// it use NewRandomFromReader fill random bits. -// On error, NewV7FromReader returns Nil and an error. -func NewV7FromReader(r io.Reader) (UUID, error) { - uuid, err := NewRandomFromReader(r) - if err != nil { - return uuid, err - } - - makeV7(uuid[:]) - return uuid, nil -} - -// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) -// uuid[8] already has the right version number (Variant is 10) -// see function NewV7 and NewV7FromReader -func makeV7(uuid []byte) { - /* - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | unix_ts_ms | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | unix_ts_ms | ver | rand_a (12 bit seq) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - |var| rand_b | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | rand_b | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - */ - _ = uuid[15] // bounds check - - t, s := getV7Time() - - uuid[0] = byte(t >> 40) - uuid[1] = byte(t >> 32) - uuid[2] = byte(t >> 24) - uuid[3] = byte(t >> 16) - uuid[4] = byte(t >> 8) - uuid[5] = byte(t) - - uuid[6] = 0x70 | (0x0F & byte(s>>8)) - uuid[7] = byte(s) -} - -// lastV7time is the last time we returned stored as: -// -// 52 bits of time in milliseconds since epoch -// 12 bits of (fractional nanoseconds) >> 8 -var lastV7time int64 - -const nanoPerMilli = 1000000 - -// getV7Time returns the time in milliseconds and nanoseconds / 256. -// The returned (milli << 12 + seq) is guarenteed to be greater than -// (milli << 12 + seq) returned by any previous call to getV7Time. -func getV7Time() (milli, seq int64) { - timeMu.Lock() - defer timeMu.Unlock() - - nano := timeNow().UnixNano() - milli = nano / nanoPerMilli - // Sequence number is between 0 and 3906 (nanoPerMilli>>8) - seq = (nano - milli*nanoPerMilli) >> 8 - now := milli<<12 + seq - if now <= lastV7time { - now = lastV7time + 1 - milli = now >> 12 - seq = now & 0xfff - } - lastV7time = now - return milli, seq -} diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore deleted file mode 100644 index 15586a2b54..0000000000 --- a/vendor/github.com/hashicorp/hcl/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -y.output - -# ignore intellij files -.idea -*.iml -*.ipr -*.iws - -*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml deleted file mode 100644 index cb63a32161..0000000000 --- a/vendor/github.com/hashicorp/hcl/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -sudo: false - -language: go - -go: - - 1.x - - tip - -branches: - only: - - master - -script: make test diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE deleted file mode 100644 index c33dcc7c92..0000000000 --- a/vendor/github.com/hashicorp/hcl/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile deleted file mode 100644 index 84fd743f5c..0000000000 --- a/vendor/github.com/hashicorp/hcl/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -TEST?=./... - -default: test - -fmt: generate - go fmt ./... - -test: generate - go get -t ./... - go test $(TEST) $(TESTARGS) - -generate: - go generate ./... - -updatedeps: - go get -u golang.org/x/tools/cmd/stringer - -.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md deleted file mode 100644 index c8223326dd..0000000000 --- a/vendor/github.com/hashicorp/hcl/README.md +++ /dev/null @@ -1,125 +0,0 @@ -# HCL - -[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) - -HCL (HashiCorp Configuration Language) is a configuration language built -by HashiCorp. The goal of HCL is to build a structured configuration language -that is both human and machine friendly for use with command-line tools, but -specifically targeted towards DevOps tools, servers, etc. - -HCL is also fully JSON compatible. That is, JSON can be used as completely -valid input to a system expecting HCL. This helps makes systems -interoperable with other systems. - -HCL is heavily inspired by -[libucl](https://github.com/vstakhov/libucl), -nginx configuration, and others similar. - -## Why? - -A common question when viewing HCL is to ask the question: why not -JSON, YAML, etc.? - -Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) -used a variety of configuration languages from full programming languages -such as Ruby to complete data structure languages such as JSON. What we -learned is that some people wanted human-friendly configuration languages -and some people wanted machine-friendly languages. - -JSON fits a nice balance in this, but is fairly verbose and most -importantly doesn't support comments. With YAML, we found that beginners -had a really hard time determining what the actual structure was, and -ended up guessing more often than not whether to use a hyphen, colon, etc. -in order to represent some configuration key. - -Full programming languages such as Ruby enable complex behavior -a configuration language shouldn't usually allow, and also forces -people to learn some set of Ruby. - -Because of this, we decided to create our own configuration language -that is JSON-compatible. Our configuration language (HCL) is designed -to be written and modified by humans. The API for HCL allows JSON -as an input so that it is also machine-friendly (machines can generate -JSON instead of trying to generate HCL). - -Our goal with HCL is not to alienate other configuration languages. -It is instead to provide HCL as a specialized language for our tools, -and JSON as the interoperability layer. - -## Syntax - -For a complete grammar, please see the parser itself. A high-level overview -of the syntax and grammar is listed here. - - * Single line comments start with `#` or `//` - - * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments - are not allowed. A multi-line comment (also known as a block comment) - terminates at the first `*/` found. - - * Values are assigned with the syntax `key = value` (whitespace doesn't - matter). The value can be any primitive: a string, number, boolean, - object, or list. - - * Strings are double-quoted and can contain any UTF-8 characters. - Example: `"Hello, World"` - - * Multi-line strings start with `<- - echo %Path% - - go version - - go env - - go get -t ./... - -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go deleted file mode 100644 index d9a00f21d4..0000000000 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ /dev/null @@ -1,769 +0,0 @@ -package hcl - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" - "github.com/hashicorp/hcl/hcl/token" -) - -// This is the tag to use with structures to have settings for HCL -const tagName = "hcl" - -var ( - // nodeType holds a reference to the type of ast.Node - nodeType reflect.Type = findNodeType() -) - -// Unmarshal accepts a byte slice as input and writes the -// data to the value pointed to by v. -func Unmarshal(bs []byte, v interface{}) error { - root, err := parse(bs) - if err != nil { - return err - } - - return DecodeObject(v, root) -} - -// Decode reads the given input and decodes it into the structure -// given by `out`. -func Decode(out interface{}, in string) error { - obj, err := Parse(in) - if err != nil { - return err - } - - return DecodeObject(out, obj) -} - -// DecodeObject is a lower-level version of Decode. It decodes a -// raw Object into the given output. -func DecodeObject(out interface{}, n ast.Node) error { - val := reflect.ValueOf(out) - if val.Kind() != reflect.Ptr { - return errors.New("result must be a pointer") - } - - // If we have the file, we really decode the root node - if f, ok := n.(*ast.File); ok { - n = f.Node - } - - var d decoder - return d.decode("root", n, val.Elem()) -} - -type decoder struct { - stack []reflect.Kind -} - -func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { - k := result - - // If we have an interface with a valid value, we use that - // for the check. - if result.Kind() == reflect.Interface { - elem := result.Elem() - if elem.IsValid() { - k = elem - } - } - - // Push current onto stack unless it is an interface. - if k.Kind() != reflect.Interface { - d.stack = append(d.stack, k.Kind()) - - // Schedule a pop - defer func() { - d.stack = d.stack[:len(d.stack)-1] - }() - } - - switch k.Kind() { - case reflect.Bool: - return d.decodeBool(name, node, result) - case reflect.Float32, reflect.Float64: - return d.decodeFloat(name, node, result) - case reflect.Int, reflect.Int32, reflect.Int64: - return d.decodeInt(name, node, result) - case reflect.Interface: - // When we see an interface, we make our own thing - return d.decodeInterface(name, node, result) - case reflect.Map: - return d.decodeMap(name, node, result) - case reflect.Ptr: - return d.decodePtr(name, node, result) - case reflect.Slice: - return d.decodeSlice(name, node, result) - case reflect.String: - return d.decodeString(name, node, result) - case reflect.Struct: - return d.decodeStruct(name, node, result) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), - } - } -} - -func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.BOOL { - v, err := strconv.ParseBool(n.Token.Text) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v)) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { - v, err := strconv.ParseFloat(n.Token.Text, 64) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - v, err := strconv.ParseInt(n.Token.Text, 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - case token.STRING: - v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { - // When we see an ast.Node, we retain the value to enable deferred decoding. - // Very useful in situations where we want to preserve ast.Node information - // like Pos - if result.Type() == nodeType && result.CanSet() { - result.Set(reflect.ValueOf(node)) - return nil - } - - var set reflect.Value - redecode := true - - // For testing types, ObjectType should just be treated as a list. We - // set this to a temporary var because we want to pass in the real node. - testNode := node - if ot, ok := node.(*ast.ObjectType); ok { - testNode = ot.List - } - - switch n := testNode.(type) { - case *ast.ObjectList: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) - set = result - } - case *ast.ObjectType: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 1) - set = result - } - case *ast.ListType: - var temp []interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 0) - set = result - case *ast.LiteralType: - switch n.Token.Type { - case token.BOOL: - var result bool - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.FLOAT: - var result float64 - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.NUMBER: - var result int - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.STRING, token.HEREDOC: - set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), - } - } - default: - return fmt.Errorf( - "%s: cannot decode into interface: %T", - name, node) - } - - // Set the result to what its supposed to be, then reset - // result so we don't reflect into this method anymore. - result.Set(set) - - if redecode { - // Revisit the node so that we can use the newly instantiated - // thing and populate it. - if err := d.decode(name, node, result); err != nil { - return err - } - } - - return nil -} - -func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { - if item, ok := node.(*ast.ObjectItem); ok { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - n, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), - } - } - - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - - resultType := result.Type() - resultElemType := resultType.Elem() - resultKeyType := resultType.Key() - if resultKeyType.Kind() != reflect.String { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Make a map if it is nil - resultMap := result - if result.IsNil() { - resultMap = reflect.MakeMap( - reflect.MapOf(resultKeyType, resultElemType)) - } - - // Go through each element and decode it. - done := make(map[string]struct{}) - for _, item := range n.Items { - if item.Val == nil { - continue - } - - // github.com/hashicorp/terraform/issue/5740 - if len(item.Keys) == 0 { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Get the key we're dealing with, which is the first item - keyStr := item.Keys[0].Token.Value().(string) - - // If we've already processed this key, then ignore it - if _, ok := done[keyStr]; ok { - continue - } - - // Determine the value. If we have more than one key, then we - // get the objectlist of only these keys. - itemVal := item.Val - if len(item.Keys) > 1 { - itemVal = n.Filter(keyStr) - done[keyStr] = struct{}{} - } - - // Make the field name - fieldName := fmt.Sprintf("%s.%s", name, keyStr) - - // Get the key/value as reflection values - key := reflect.ValueOf(keyStr) - val := reflect.Indirect(reflect.New(resultElemType)) - - // If we have a pre-existing value in the map, use that - oldVal := resultMap.MapIndex(key) - if oldVal.IsValid() { - val.Set(oldVal) - } - - // Decode! - if err := d.decode(fieldName, itemVal, val); err != nil { - return err - } - - // Set the value on the map - resultMap.SetMapIndex(key, val) - } - - // Set the final map if we can - set.Set(resultMap) - return nil -} - -func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - resultType := result.Type() - resultElemType := resultType.Elem() - val := reflect.New(resultElemType) - if err := d.decode(name, node, reflect.Indirect(val)); err != nil { - return err - } - - result.Set(val) - return nil -} - -func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - // Create the slice if it isn't nil - resultType := result.Type() - resultElemType := resultType.Elem() - if result.IsNil() { - resultSliceType := reflect.SliceOf(resultElemType) - result = reflect.MakeSlice( - resultSliceType, 0, 0) - } - - // Figure out the items we'll be copying into the slice - var items []ast.Node - switch n := node.(type) { - case *ast.ObjectList: - items = make([]ast.Node, len(n.Items)) - for i, item := range n.Items { - items[i] = item - } - case *ast.ObjectType: - items = []ast.Node{n} - case *ast.ListType: - items = n.List - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("unknown slice type: %T", node), - } - } - - for i, item := range items { - fieldName := fmt.Sprintf("%s[%d]", name, i) - - // Decode - val := reflect.Indirect(reflect.New(resultElemType)) - - // if item is an object that was decoded from ambiguous JSON and - // flattened, make sure it's expanded if it needs to decode into a - // defined structure. - item := expandObject(item, val) - - if err := d.decode(fieldName, item, val); err != nil { - return err - } - - // Append it onto the slice - result = reflect.Append(result, val) - } - - set.Set(result) - return nil -} - -// expandObject detects if an ambiguous JSON object was flattened to a List which -// should be decoded into a struct, and expands the ast to properly deocode. -func expandObject(node ast.Node, result reflect.Value) ast.Node { - item, ok := node.(*ast.ObjectItem) - if !ok { - return node - } - - elemType := result.Type() - - // our target type must be a struct - switch elemType.Kind() { - case reflect.Ptr: - switch elemType.Elem().Kind() { - case reflect.Struct: - //OK - default: - return node - } - case reflect.Struct: - //OK - default: - return node - } - - // A list value will have a key and field name. If it had more fields, - // it wouldn't have been flattened. - if len(item.Keys) != 2 { - return node - } - - keyToken := item.Keys[0].Token - item.Keys = item.Keys[1:] - - // we need to un-flatten the ast enough to decode - newNode := &ast.ObjectItem{ - Keys: []*ast.ObjectKey{ - { - Token: keyToken, - }, - }, - Val: &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{item}, - }, - }, - } - - return newNode -} - -func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) - return nil - case token.STRING, token.HEREDOC: - result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type for string %T", name, node), - } -} - -func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { - var item *ast.ObjectItem - if it, ok := node.(*ast.ObjectItem); ok { - item = it - node = it.Val - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - // Handle the special case where the object itself is a literal. Previously - // the yacc parser would always ensure top-level elements were arrays. The new - // parser does not make the same guarantees, thus we need to convert any - // top-level literal elements into a list. - if _, ok := node.(*ast.LiteralType); ok && item != nil { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - list, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), - } - } - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = result - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") - - // Ignore fields with tag name "-" - if tagParts[0] == "-" { - continue - } - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unsupported type to struct: %s", - fieldType.Name, fieldKind), - } - } - - // We have an embedded field. We "squash" the fields down - // if specified in the tag. - squash := false - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - structs = append( - structs, result.FieldByName(fieldType.Name)) - continue - } - } - - // Normal struct field, store it away - fields = append(fields, field{fieldType, structVal.Field(i)}) - } - } - - usedKeys := make(map[string]struct{}) - decodedFields := make([]string, 0, len(fields)) - decodedFieldsVal := make([]reflect.Value, 0) - unusedKeysVal := make([]reflect.Value, 0) - - // fill unusedNodeKeys with keys from the AST - // a slice because we have to do equals case fold to match Filter - unusedNodeKeys := make(map[string][]token.Pos, 0) - for i, item := range list.Items { - for _, k := range item.Keys { - // isNestedJSON returns true for e.g. bar in - // { "foo": { "bar": {...} } } - // This isn't an unused node key, so we want to skip it - isNestedJSON := i > 0 && len(item.Keys) > 1 - if !isNestedJSON && (k.Token.JSON || k.Token.Type == token.IDENT) { - fn := k.Token.Value().(string) - sl := unusedNodeKeys[fn] - unusedNodeKeys[fn] = append(sl, k.Token.Pos) - } - } - } - - for _, f := range fields { - field, fieldValue := f.field, f.val - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - fieldName := field.Name - - tagValue := field.Tag.Get(tagName) - tagParts := strings.SplitN(tagValue, ",", 2) - if len(tagParts) >= 2 { - switch tagParts[1] { - case "decodedFields": - decodedFieldsVal = append(decodedFieldsVal, fieldValue) - continue - case "key": - if item == nil { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: %s asked for 'key', impossible", - name, fieldName), - } - } - - fieldValue.SetString(item.Keys[0].Token.Value().(string)) - continue - case "unusedKeyPositions": - unusedKeysVal = append(unusedKeysVal, fieldValue) - continue - } - } - - if tagParts[0] != "" { - fieldName = tagParts[0] - } - - // Determine the element we'll use to decode. If it is a single - // match (only object with the field), then we decode it exactly. - // If it is a prefix match, then we decode the matches. - filter := list.Filter(fieldName) - - prefixMatches := filter.Children() - matches := filter.Elem() - if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { - continue - } - - // Track the used keys - usedKeys[fieldName] = struct{}{} - unusedNodeKeys = removeCaseFold(unusedNodeKeys, fieldName) - - // Create the field name and decode. We range over the elements - // because we actually want the value. - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - if len(prefixMatches.Items) > 0 { - if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { - return err - } - } - for _, match := range matches.Items { - var decodeNode ast.Node = match.Val - if ot, ok := decodeNode.(*ast.ObjectType); ok { - decodeNode = &ast.ObjectList{Items: ot.List.Items} - } - - if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { - return err - } - } - - decodedFields = append(decodedFields, field.Name) - } - - if len(decodedFieldsVal) > 0 { - // Sort it so that it is deterministic - sort.Strings(decodedFields) - - for _, v := range decodedFieldsVal { - v.Set(reflect.ValueOf(decodedFields)) - } - } - - if len(unusedNodeKeys) > 0 { - // like decodedFields, populated the unusedKeys field(s) - for _, v := range unusedKeysVal { - v.Set(reflect.ValueOf(unusedNodeKeys)) - } - } - - return nil -} - -// findNodeType returns the type of ast.Node -func findNodeType() reflect.Type { - var nodeContainer struct { - Node ast.Node - } - value := reflect.ValueOf(nodeContainer).FieldByName("Node") - return value.Type() -} - -func removeCaseFold(xs map[string][]token.Pos, y string) map[string][]token.Pos { - var toDel []string - - for i := range xs { - if strings.EqualFold(i, y) { - toDel = append(toDel, i) - } - } - for _, i := range toDel { - delete(xs, i) - } - return xs -} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go deleted file mode 100644 index 575a20b50b..0000000000 --- a/vendor/github.com/hashicorp/hcl/hcl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package hcl decodes HCL into usable Go structures. -// -// hcl input can come in either pure HCL format or JSON format. -// It can be parsed into an AST, and then decoded into a structure, -// or it can be decoded directly from a string into a structure. -// -// If you choose to parse HCL into a raw AST, the benefit is that you -// can write custom visitor implementations to implement custom -// semantic checks. By default, HCL does not perform any semantic -// checks. -package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go deleted file mode 100644 index f5b6b93b94..0000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go +++ /dev/null @@ -1,226 +0,0 @@ -// Package ast declares the types used to represent syntax trees for HCL -// (HashiCorp Configuration Language) -package ast - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/token" -) - -// Node is an element in the abstract syntax tree. -type Node interface { - node() - Pos() token.Pos -} - -func (File) node() {} -func (ObjectList) node() {} -func (ObjectKey) node() {} -func (ObjectItem) node() {} -func (Comment) node() {} -func (CommentGroup) node() {} -func (ObjectType) node() {} -func (LiteralType) node() {} -func (ListType) node() {} - -var unknownPos token.Pos - -// File represents a single HCL file -type File struct { - Node Node // usually a *ObjectList - Comments []*CommentGroup // list of all comments in the source -} - -func (f *File) Pos() token.Pos { - return f.Node.Pos() -} - -// ObjectList represents a list of ObjectItems. An HCL file itself is an -// ObjectList. -type ObjectList struct { - Items []*ObjectItem -} - -func (o *ObjectList) Add(item *ObjectItem) { - o.Items = append(o.Items, item) -} - -// Filter filters out the objects with the given key list as a prefix. -// -// The returned list of objects contain ObjectItems where the keys have -// this prefix already stripped off. This might result in objects with -// zero-length key lists if they have no children. -// -// If no matches are found, an empty ObjectList (non-nil) is returned. -func (o *ObjectList) Filter(keys ...string) *ObjectList { - var result ObjectList - for _, item := range o.Items { - // If there aren't enough keys, then ignore this - if len(item.Keys) < len(keys) { - continue - } - - match := true - for i, key := range item.Keys[:len(keys)] { - key := key.Token.Value().(string) - if key != keys[i] && !strings.EqualFold(key, keys[i]) { - match = false - break - } - } - if !match { - continue - } - - // Strip off the prefix from the children - newItem := *item - newItem.Keys = newItem.Keys[len(keys):] - result.Add(&newItem) - } - - return &result -} - -// Children returns further nested objects (key length > 0) within this -// ObjectList. This should be used with Filter to get at child items. -func (o *ObjectList) Children() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) > 0 { - result.Add(item) - } - } - - return &result -} - -// Elem returns items in the list that are direct element assignments -// (key length == 0). This should be used with Filter to get at elements. -func (o *ObjectList) Elem() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) == 0 { - result.Add(item) - } - } - - return &result -} - -func (o *ObjectList) Pos() token.Pos { - // If an Object has no members, it won't have a first item - // to use as position - if len(o.Items) == 0 { - return unknownPos - } - // Return the uninitialized position - return o.Items[0].Pos() -} - -// ObjectItem represents a HCL Object Item. An item is represented with a key -// (or keys). It can be an assignment or an object (both normal and nested) -type ObjectItem struct { - // keys is only one length long if it's of type assignment. If it's a - // nested object it can be larger than one. In that case "assign" is - // invalid as there is no assignments for a nested object. - Keys []*ObjectKey - - // assign contains the position of "=", if any - Assign token.Pos - - // val is the item itself. It can be an object,list, number, bool or a - // string. If key length is larger than one, val can be only of type - // Object. - Val Node - - LeadComment *CommentGroup // associated lead comment - LineComment *CommentGroup // associated line comment -} - -func (o *ObjectItem) Pos() token.Pos { - // If a parsed object has no keys, there is no position - // for its first element. - if len(o.Keys) == 0 { - return unknownPos - } - - return o.Keys[0].Pos() -} - -// ObjectKeys are either an identifier or of type string. -type ObjectKey struct { - Token token.Token -} - -func (o *ObjectKey) Pos() token.Pos { - return o.Token.Pos -} - -// LiteralType represents a literal of basic type. Valid types are: -// token.NUMBER, token.FLOAT, token.BOOL and token.STRING -type LiteralType struct { - Token token.Token - - // comment types, only used when in a list - LeadComment *CommentGroup - LineComment *CommentGroup -} - -func (l *LiteralType) Pos() token.Pos { - return l.Token.Pos -} - -// ListStatement represents a HCL List type -type ListType struct { - Lbrack token.Pos // position of "[" - Rbrack token.Pos // position of "]" - List []Node // the elements in lexical order -} - -func (l *ListType) Pos() token.Pos { - return l.Lbrack -} - -func (l *ListType) Add(node Node) { - l.List = append(l.List, node) -} - -// ObjectType represents a HCL Object Type -type ObjectType struct { - Lbrace token.Pos // position of "{" - Rbrace token.Pos // position of "}" - List *ObjectList // the nodes in lexical order -} - -func (o *ObjectType) Pos() token.Pos { - return o.Lbrace -} - -// Comment node represents a single //, # style or /*- style commment -type Comment struct { - Start token.Pos // position of / or # - Text string -} - -func (c *Comment) Pos() token.Pos { - return c.Start -} - -// CommentGroup node represents a sequence of comments with no other tokens and -// no empty lines between. -type CommentGroup struct { - List []*Comment // len(List) > 0 -} - -func (c *CommentGroup) Pos() token.Pos { - return c.List[0].Pos() -} - -//------------------------------------------------------------------- -// GoStringer -//------------------------------------------------------------------- - -func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } -func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go deleted file mode 100644 index ba07ad42b0..0000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go +++ /dev/null @@ -1,52 +0,0 @@ -package ast - -import "fmt" - -// WalkFunc describes a function to be called for each node during a Walk. The -// returned node can be used to rewrite the AST. Walking stops the returned -// bool is false. -type WalkFunc func(Node) (Node, bool) - -// Walk traverses an AST in depth-first order: It starts by calling fn(node); -// node must not be nil. If fn returns true, Walk invokes fn recursively for -// each of the non-nil children of node, followed by a call of fn(nil). The -// returned node of fn can be used to rewrite the passed node to fn. -func Walk(node Node, fn WalkFunc) Node { - rewritten, ok := fn(node) - if !ok { - return rewritten - } - - switch n := node.(type) { - case *File: - n.Node = Walk(n.Node, fn) - case *ObjectList: - for i, item := range n.Items { - n.Items[i] = Walk(item, fn).(*ObjectItem) - } - case *ObjectKey: - // nothing to do - case *ObjectItem: - for i, k := range n.Keys { - n.Keys[i] = Walk(k, fn).(*ObjectKey) - } - - if n.Val != nil { - n.Val = Walk(n.Val, fn) - } - case *LiteralType: - // nothing to do - case *ListType: - for i, l := range n.List { - n.List[i] = Walk(l, fn) - } - case *ObjectType: - n.List = Walk(n.List, fn).(*ObjectList) - default: - // should we panic here? - fmt.Printf("unknown type: %T\n", n) - } - - fn(nil) - return rewritten -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go deleted file mode 100644 index 5c99381dfb..0000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go +++ /dev/null @@ -1,17 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/token" -) - -// PosError is a parse error that contains a position. -type PosError struct { - Pos token.Pos - Err error -} - -func (e *PosError) Error() string { - return fmt.Sprintf("At %s: %s", e.Pos, e.Err) -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go deleted file mode 100644 index 64c83bcfb5..0000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ /dev/null @@ -1,532 +0,0 @@ -// Package parser implements a parser for HCL (HashiCorp Configuration -// Language) -package parser - -import ( - "bytes" - "errors" - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/scanner" - "github.com/hashicorp/hcl/hcl/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - comments []*ast.CommentGroup - leadComment *ast.CommentGroup // last lead comment - lineComment *ast.CommentGroup // last line comment - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - // normalize all line endings - // since the scanner and output only work with "\n" line endings, we may - // end up with dangling "\r" characters in the parsed data. - src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) - - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = &PosError{Pos: pos, Err: errors.New(msg)} - } - - f.Node, err = p.objectList(false) - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - f.Comments = p.comments - return f, nil -} - -// objectList parses a list of items within an object (generally k/v pairs). -// The parameter" obj" tells this whether to we are within an object (braces: -// '{', '}') or just at the top level. If we're within an object, we end -// at an RBRACE. -func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - if obj { - tok := p.scan() - p.unscan() - if tok.Type == token.RBRACE { - break - } - } - - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // object lists can be optionally comma-delimited e.g. when a list of maps - // is being expressed, so a comma is allowed here - it's simply consumed - tok := p.scan() - if tok.Type != token.COMMA { - p.unscan() - } - } - return node, nil -} - -func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { - endline = p.tok.Pos.Line - - // count the endline if it's multiline comment, ie starting with /* - if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { - // don't use range here - no need to decode Unicode code points - for i := 0; i < len(p.tok.Text); i++ { - if p.tok.Text[i] == '\n' { - endline++ - } - } - } - - comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} - p.tok = p.sc.Scan() - return -} - -func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { - var list []*ast.Comment - endline = p.tok.Pos.Line - - for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { - var comment *ast.Comment - comment, endline = p.consumeComment() - list = append(list, comment) - } - - // add comment group to the comments list - comments = &ast.CommentGroup{List: list} - p.comments = append(p.comments, comments) - - return -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if len(keys) > 0 && err == errEofToken { - // We ignore eof token here since it is an error if we didn't - // receive a value (but we did receive a key) for the item. - err = nil - } - if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { - // This is a strange boolean statement, but what it means is: - // We have keys with no value, and we're likely in an object - // (since RBrace ends an object). For this, we set err to nil so - // we continue and get the error below of having the wrong value - // type. - err = nil - - // Reset the token type so we don't think it completed fine. See - // objectType which uses p.tok.Type to check if we're done with - // the object. - p.tok.Type = token.EOF - } - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - if p.leadComment != nil { - o.LeadComment = p.leadComment - p.leadComment = nil - } - - switch p.tok.Type { - case token.ASSIGN: - o.Assign = p.tok.Pos - o.Val, err = p.object() - if err != nil { - return nil, err - } - case token.LBRACE: - o.Val, err = p.objectType() - if err != nil { - return nil, err - } - default: - keyStr := make([]string, 0, len(keys)) - for _, k := range keys { - keyStr = append(keyStr, k.Token.Text) - } - - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf( - "key '%s' expected start of object ('{') or assignment ('=')", - strings.Join(keyStr, " ")), - } - } - - // key=#comment - // val - if p.lineComment != nil { - o.LineComment, p.lineComment = p.lineComment, nil - } - - // do a look-ahead for line comment - p.scan() - if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { - o.LineComment = p.lineComment - p.lineComment = nil - } - p.unscan() - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - // It is very important to also return the keys here as well as - // the error. This is because we need to be able to tell if we - // did parse keys prior to finding the EOF, or if we just found - // a bare EOF. - return keys, errEofToken - case token.ASSIGN: - // assignment or object only, but not nested objects. this is not - // allowed: `foo bar = {}` - if keyCount > 1 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), - } - } - - if keyCount == 0 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: errors.New("no object keys found!"), - } - } - - return keys, nil - case token.LBRACE: - var err error - - // If we have no keys, then it is a syntax error. i.e. {{}} is not - // allowed. - if len(keys) == 0 { - err = &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), - } - } - - // object - return keys, err - case token.IDENT, token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{Token: p.tok}) - case token.ILLEGAL: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("illegal character"), - } - default: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), - } - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (ast.Node, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.COMMENT: - // implement comment - case token.EOF: - return nil, errEofToken - } - - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("Unknown token: %+v", tok), - } -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{ - Lbrace: p.tok.Pos, - } - - l, err := p.objectList(true) - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - // No error, scan and expect the ending to be a brace - if tok := p.scan(); tok.Type != token.RBRACE { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), - } - } - - o.List = l - o.Rbrace = p.tok.Pos // advanced via parseObjectList - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{ - Lbrack: p.tok.Pos, - } - - needComma := false - for { - tok := p.scan() - if needComma { - switch tok.Type { - case token.COMMA, token.RBRACK: - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error parsing list, expected comma or list end, got: %s", - tok.Type), - } - } - } - switch tok.Type { - case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: - node, err := p.literalType() - if err != nil { - return nil, err - } - - // If there is a lead comment, apply it - if p.leadComment != nil { - node.LeadComment = p.leadComment - p.leadComment = nil - } - - l.Add(node) - needComma = true - case token.COMMA: - // get next list item or we are at the end - // do a look-ahead for line comment - p.scan() - if p.lineComment != nil && len(l.List) > 0 { - lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) - if ok { - lit.LineComment = p.lineComment - l.List[len(l.List)-1] = lit - p.lineComment = nil - } - } - p.unscan() - - needComma = false - continue - case token.LBRACE: - // Looks like a nested object, so parse it out - node, err := p.objectType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse object within list: %s", err), - } - } - l.Add(node) - needComma = true - case token.LBRACK: - node, err := p.listType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse list within list: %s", err), - } - } - l.Add(node) - case token.RBRACK: - // finished - l.Rbrack = p.tok.Pos - return l, nil - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), - } - } - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok, - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. In the process, it collects any -// comment groups encountered, and remembers the last lead and line comments. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - // Otherwise read the next token from the scanner and Save it to the buffer - // in case we unscan later. - prev := p.tok - p.tok = p.sc.Scan() - - if p.tok.Type == token.COMMENT { - var comment *ast.CommentGroup - var endline int - - // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", - // p.tok.Pos.Line, prev.Pos.Line, endline) - if p.tok.Pos.Line == prev.Pos.Line { - // The comment is on same line as the previous token; it - // cannot be a lead comment but may be a line comment. - comment, endline = p.consumeCommentGroup(0) - if p.tok.Pos.Line != endline { - // The next token is on a different line, thus - // the last comment group is a line comment. - p.lineComment = comment - } - } - - // consume successor comments, if any - endline = -1 - for p.tok.Type == token.COMMENT { - comment, endline = p.consumeCommentGroup(1) - } - - if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { - switch p.tok.Type { - case token.RBRACE, token.RBRACK: - // Do not count for these cases - default: - // The next token is following on the line immediately after the - // comment group, thus the last comment group is a lead comment. - p.leadComment = comment - } - } - - } - - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go deleted file mode 100644 index 7c038d12a2..0000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go +++ /dev/null @@ -1,789 +0,0 @@ -package printer - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/token" -) - -const ( - blank = byte(' ') - newline = byte('\n') - tab = byte('\t') - infinity = 1 << 30 // offset or line -) - -var ( - unindent = []byte("\uE123") // in the private use space -) - -type printer struct { - cfg Config - prev token.Pos - - comments []*ast.CommentGroup // may be nil, contains all comments - standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node) - - enableTrace bool - indentTrace int -} - -type ByPosition []*ast.CommentGroup - -func (b ByPosition) Len() int { return len(b) } -func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) } - -// collectComments comments all standalone comments which are not lead or line -// comment -func (p *printer) collectComments(node ast.Node) { - // first collect all comments. This is already stored in - // ast.File.(comments) - ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { - switch t := nn.(type) { - case *ast.File: - p.comments = t.Comments - return nn, false - } - return nn, true - }) - - standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0) - for _, c := range p.comments { - standaloneComments[c.Pos()] = c - } - - // next remove all lead and line comments from the overall comment map. - // This will give us comments which are standalone, comments which are not - // assigned to any kind of node. - ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { - switch t := nn.(type) { - case *ast.LiteralType: - if t.LeadComment != nil { - for _, comment := range t.LeadComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - - if t.LineComment != nil { - for _, comment := range t.LineComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - case *ast.ObjectItem: - if t.LeadComment != nil { - for _, comment := range t.LeadComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - - if t.LineComment != nil { - for _, comment := range t.LineComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - } - - return nn, true - }) - - for _, c := range standaloneComments { - p.standaloneComments = append(p.standaloneComments, c) - } - - sort.Sort(ByPosition(p.standaloneComments)) -} - -// output prints creates b printable HCL output and returns it. -func (p *printer) output(n interface{}) []byte { - var buf bytes.Buffer - - switch t := n.(type) { - case *ast.File: - // File doesn't trace so we add the tracing here - defer un(trace(p, "File")) - return p.output(t.Node) - case *ast.ObjectList: - defer un(trace(p, "ObjectList")) - - var index int - for { - // Determine the location of the next actual non-comment - // item. If we're at the end, the next item is at "infinity" - var nextItem token.Pos - if index != len(t.Items) { - nextItem = t.Items[index].Pos() - } else { - nextItem = token.Pos{Offset: infinity, Line: infinity} - } - - // Go through the standalone comments in the file and print out - // the comments that we should be for this object item. - for _, c := range p.standaloneComments { - // Go through all the comments in the group. The group - // should be printed together, not separated by double newlines. - printed := false - newlinePrinted := false - for _, comment := range c.List { - // We only care about comments after the previous item - // we've printed so that comments are printed in the - // correct locations (between two objects for example). - // And before the next item. - if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { - // if we hit the end add newlines so we can print the comment - // we don't do this if prev is invalid which means the - // beginning of the file since the first comment should - // be at the first line. - if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) { - buf.Write([]byte{newline, newline}) - newlinePrinted = true - } - - // Write the actual comment. - buf.WriteString(comment.Text) - buf.WriteByte(newline) - - // Set printed to true to note that we printed something - printed = true - } - } - - // If we're not at the last item, write a new line so - // that there is a newline separating this comment from - // the next object. - if printed && index != len(t.Items) { - buf.WriteByte(newline) - } - } - - if index == len(t.Items) { - break - } - - buf.Write(p.output(t.Items[index])) - if index != len(t.Items)-1 { - // Always write a newline to separate us from the next item - buf.WriteByte(newline) - - // Need to determine if we're going to separate the next item - // with a blank line. The logic here is simple, though there - // are a few conditions: - // - // 1. The next object is more than one line away anyways, - // so we need an empty line. - // - // 2. The next object is not a "single line" object, so - // we need an empty line. - // - // 3. This current object is not a single line object, - // so we need an empty line. - current := t.Items[index] - next := t.Items[index+1] - if next.Pos().Line != t.Items[index].Pos().Line+1 || - !p.isSingleLineObject(next) || - !p.isSingleLineObject(current) { - buf.WriteByte(newline) - } - } - index++ - } - case *ast.ObjectKey: - buf.WriteString(t.Token.Text) - case *ast.ObjectItem: - p.prev = t.Pos() - buf.Write(p.objectItem(t)) - case *ast.LiteralType: - buf.Write(p.literalType(t)) - case *ast.ListType: - buf.Write(p.list(t)) - case *ast.ObjectType: - buf.Write(p.objectType(t)) - default: - fmt.Printf(" unknown type: %T\n", n) - } - - return buf.Bytes() -} - -func (p *printer) literalType(lit *ast.LiteralType) []byte { - result := []byte(lit.Token.Text) - switch lit.Token.Type { - case token.HEREDOC: - // Clear the trailing newline from heredocs - if result[len(result)-1] == '\n' { - result = result[:len(result)-1] - } - - // Poison lines 2+ so that we don't indent them - result = p.heredocIndent(result) - case token.STRING: - // If this is a multiline string, poison lines 2+ so we don't - // indent them. - if bytes.IndexRune(result, '\n') >= 0 { - result = p.heredocIndent(result) - } - } - - return result -} - -// objectItem returns the printable HCL form of an object item. An object type -// starts with one/multiple keys and has a value. The value might be of any -// type. -func (p *printer) objectItem(o *ast.ObjectItem) []byte { - defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text))) - var buf bytes.Buffer - - if o.LeadComment != nil { - for _, comment := range o.LeadComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - // If key and val are on different lines, treat line comments like lead comments. - if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line { - for _, comment := range o.LineComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - for i, k := range o.Keys { - buf.WriteString(k.Token.Text) - buf.WriteByte(blank) - - // reach end of key - if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 { - buf.WriteString("=") - buf.WriteByte(blank) - } - } - - buf.Write(p.output(o.Val)) - - if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line { - buf.WriteByte(blank) - for _, comment := range o.LineComment.List { - buf.WriteString(comment.Text) - } - } - - return buf.Bytes() -} - -// objectType returns the printable HCL form of an object type. An object type -// begins with a brace and ends with a brace. -func (p *printer) objectType(o *ast.ObjectType) []byte { - defer un(trace(p, "ObjectType")) - var buf bytes.Buffer - buf.WriteString("{") - - var index int - var nextItem token.Pos - var commented, newlinePrinted bool - for { - // Determine the location of the next actual non-comment - // item. If we're at the end, the next item is the closing brace - if index != len(o.List.Items) { - nextItem = o.List.Items[index].Pos() - } else { - nextItem = o.Rbrace - } - - // Go through the standalone comments in the file and print out - // the comments that we should be for this object item. - for _, c := range p.standaloneComments { - printed := false - var lastCommentPos token.Pos - for _, comment := range c.List { - // We only care about comments after the previous item - // we've printed so that comments are printed in the - // correct locations (between two objects for example). - // And before the next item. - if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { - // If there are standalone comments and the initial newline has not - // been printed yet, do it now. - if !newlinePrinted { - newlinePrinted = true - buf.WriteByte(newline) - } - - // add newline if it's between other printed nodes - if index > 0 { - commented = true - buf.WriteByte(newline) - } - - // Store this position - lastCommentPos = comment.Pos() - - // output the comment itself - buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) - - // Set printed to true to note that we printed something - printed = true - - /* - if index != len(o.List.Items) { - buf.WriteByte(newline) // do not print on the end - } - */ - } - } - - // Stuff to do if we had comments - if printed { - // Always write a newline - buf.WriteByte(newline) - - // If there is another item in the object and our comment - // didn't hug it directly, then make sure there is a blank - // line separating them. - if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { - buf.WriteByte(newline) - } - } - } - - if index == len(o.List.Items) { - p.prev = o.Rbrace - break - } - - // At this point we are sure that it's not a totally empty block: print - // the initial newline if it hasn't been printed yet by the previous - // block about standalone comments. - if !newlinePrinted { - buf.WriteByte(newline) - newlinePrinted = true - } - - // check if we have adjacent one liner items. If yes we'll going to align - // the comments. - var aligned []*ast.ObjectItem - for _, item := range o.List.Items[index:] { - // we don't group one line lists - if len(o.List.Items) == 1 { - break - } - - // one means a oneliner with out any lead comment - // two means a oneliner with lead comment - // anything else might be something else - cur := lines(string(p.objectItem(item))) - if cur > 2 { - break - } - - curPos := item.Pos() - - nextPos := token.Pos{} - if index != len(o.List.Items)-1 { - nextPos = o.List.Items[index+1].Pos() - } - - prevPos := token.Pos{} - if index != 0 { - prevPos = o.List.Items[index-1].Pos() - } - - // fmt.Println("DEBUG ----------------") - // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) - // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) - // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) - - if curPos.Line+1 == nextPos.Line { - aligned = append(aligned, item) - index++ - continue - } - - if curPos.Line-1 == prevPos.Line { - aligned = append(aligned, item) - index++ - - // finish if we have a new line or comment next. This happens - // if the next item is not adjacent - if curPos.Line+1 != nextPos.Line { - break - } - continue - } - - break - } - - // put newlines if the items are between other non aligned items. - // newlines are also added if there is a standalone comment already, so - // check it too - if !commented && index != len(aligned) { - buf.WriteByte(newline) - } - - if len(aligned) >= 1 { - p.prev = aligned[len(aligned)-1].Pos() - - items := p.alignedItems(aligned) - buf.Write(p.indent(items)) - } else { - p.prev = o.List.Items[index].Pos() - - buf.Write(p.indent(p.objectItem(o.List.Items[index]))) - index++ - } - - buf.WriteByte(newline) - } - - buf.WriteString("}") - return buf.Bytes() -} - -func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { - var buf bytes.Buffer - - // find the longest key and value length, needed for alignment - var longestKeyLen int // longest key length - var longestValLen int // longest value length - for _, item := range items { - key := len(item.Keys[0].Token.Text) - val := len(p.output(item.Val)) - - if key > longestKeyLen { - longestKeyLen = key - } - - if val > longestValLen { - longestValLen = val - } - } - - for i, item := range items { - if item.LeadComment != nil { - for _, comment := range item.LeadComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - for i, k := range item.Keys { - keyLen := len(k.Token.Text) - buf.WriteString(k.Token.Text) - for i := 0; i < longestKeyLen-keyLen+1; i++ { - buf.WriteByte(blank) - } - - // reach end of key - if i == len(item.Keys)-1 && len(item.Keys) == 1 { - buf.WriteString("=") - buf.WriteByte(blank) - } - } - - val := p.output(item.Val) - valLen := len(val) - buf.Write(val) - - if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { - for i := 0; i < longestValLen-valLen+1; i++ { - buf.WriteByte(blank) - } - - for _, comment := range item.LineComment.List { - buf.WriteString(comment.Text) - } - } - - // do not print for the last item - if i != len(items)-1 { - buf.WriteByte(newline) - } - } - - return buf.Bytes() -} - -// list returns the printable HCL form of an list type. -func (p *printer) list(l *ast.ListType) []byte { - if p.isSingleLineList(l) { - return p.singleLineList(l) - } - - var buf bytes.Buffer - buf.WriteString("[") - buf.WriteByte(newline) - - var longestLine int - for _, item := range l.List { - // for now we assume that the list only contains literal types - if lit, ok := item.(*ast.LiteralType); ok { - lineLen := len(lit.Token.Text) - if lineLen > longestLine { - longestLine = lineLen - } - } - } - - haveEmptyLine := false - for i, item := range l.List { - // If we have a lead comment, then we want to write that first - leadComment := false - if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { - leadComment = true - - // Ensure an empty line before every element with a - // lead comment (except the first item in a list). - if !haveEmptyLine && i != 0 { - buf.WriteByte(newline) - } - - for _, comment := range lit.LeadComment.List { - buf.Write(p.indent([]byte(comment.Text))) - buf.WriteByte(newline) - } - } - - // also indent each line - val := p.output(item) - curLen := len(val) - buf.Write(p.indent(val)) - - // if this item is a heredoc, then we output the comma on - // the next line. This is the only case this happens. - comma := []byte{','} - if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { - buf.WriteByte(newline) - comma = p.indent(comma) - } - - buf.Write(comma) - - if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { - // if the next item doesn't have any comments, do not align - buf.WriteByte(blank) // align one space - for i := 0; i < longestLine-curLen; i++ { - buf.WriteByte(blank) - } - - for _, comment := range lit.LineComment.List { - buf.WriteString(comment.Text) - } - } - - buf.WriteByte(newline) - - // Ensure an empty line after every element with a - // lead comment (except the first item in a list). - haveEmptyLine = leadComment && i != len(l.List)-1 - if haveEmptyLine { - buf.WriteByte(newline) - } - } - - buf.WriteString("]") - return buf.Bytes() -} - -// isSingleLineList returns true if: -// * they were previously formatted entirely on one line -// * they consist entirely of literals -// * there are either no heredoc strings or the list has exactly one element -// * there are no line comments -func (printer) isSingleLineList(l *ast.ListType) bool { - for _, item := range l.List { - if item.Pos().Line != l.Lbrack.Line { - return false - } - - lit, ok := item.(*ast.LiteralType) - if !ok { - return false - } - - if lit.Token.Type == token.HEREDOC && len(l.List) != 1 { - return false - } - - if lit.LineComment != nil { - return false - } - } - - return true -} - -// singleLineList prints a simple single line list. -// For a definition of "simple", see isSingleLineList above. -func (p *printer) singleLineList(l *ast.ListType) []byte { - buf := &bytes.Buffer{} - - buf.WriteString("[") - for i, item := range l.List { - if i != 0 { - buf.WriteString(", ") - } - - // Output the item itself - buf.Write(p.output(item)) - - // The heredoc marker needs to be at the end of line. - if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { - buf.WriteByte(newline) - } - } - - buf.WriteString("]") - return buf.Bytes() -} - -// indent indents the lines of the given buffer for each non-empty line -func (p *printer) indent(buf []byte) []byte { - var prefix []byte - if p.cfg.SpacesWidth != 0 { - for i := 0; i < p.cfg.SpacesWidth; i++ { - prefix = append(prefix, blank) - } - } else { - prefix = []byte{tab} - } - - var res []byte - bol := true - for _, c := range buf { - if bol && c != '\n' { - res = append(res, prefix...) - } - - res = append(res, c) - bol = c == '\n' - } - return res -} - -// unindent removes all the indentation from the tombstoned lines -func (p *printer) unindent(buf []byte) []byte { - var res []byte - for i := 0; i < len(buf); i++ { - skip := len(buf)-i <= len(unindent) - if !skip { - skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) - } - if skip { - res = append(res, buf[i]) - continue - } - - // We have a marker. we have to backtrace here and clean out - // any whitespace ahead of our tombstone up to a \n - for j := len(res) - 1; j >= 0; j-- { - if res[j] == '\n' { - break - } - - res = res[:j] - } - - // Skip the entire unindent marker - i += len(unindent) - 1 - } - - return res -} - -// heredocIndent marks all the 2nd and further lines as unindentable -func (p *printer) heredocIndent(buf []byte) []byte { - var res []byte - bol := false - for _, c := range buf { - if bol && c != '\n' { - res = append(res, unindent...) - } - res = append(res, c) - bol = c == '\n' - } - return res -} - -// isSingleLineObject tells whether the given object item is a single -// line object such as "obj {}". -// -// A single line object: -// -// * has no lead comments (hence multi-line) -// * has no assignment -// * has no values in the stanza (within {}) -// -func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { - // If there is a lead comment, can't be one line - if val.LeadComment != nil { - return false - } - - // If there is assignment, we always break by line - if val.Assign.IsValid() { - return false - } - - // If it isn't an object type, then its not a single line object - ot, ok := val.Val.(*ast.ObjectType) - if !ok { - return false - } - - // If the object has no items, it is single line! - return len(ot.List.Items) == 0 -} - -func lines(txt string) int { - endline := 1 - for i := 0; i < len(txt); i++ { - if txt[i] == '\n' { - endline++ - } - } - return endline -} - -// ---------------------------------------------------------------------------- -// Tracing support - -func (p *printer) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - i := 2 * p.indentTrace - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *printer, msg string) *printer { - p.printTrace(msg, "(") - p.indentTrace++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *printer) { - p.indentTrace-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go deleted file mode 100644 index 6617ab8e7a..0000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package printer implements printing of AST nodes to HCL format. -package printer - -import ( - "bytes" - "io" - "text/tabwriter" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" -) - -var DefaultConfig = Config{ - SpacesWidth: 2, -} - -// A Config node controls the output of Fprint. -type Config struct { - SpacesWidth int // if set, it will use spaces instead of tabs for alignment -} - -func (c *Config) Fprint(output io.Writer, node ast.Node) error { - p := &printer{ - cfg: *c, - comments: make([]*ast.CommentGroup, 0), - standaloneComments: make([]*ast.CommentGroup, 0), - // enableTrace: true, - } - - p.collectComments(node) - - if _, err := output.Write(p.unindent(p.output(node))); err != nil { - return err - } - - // flush tabwriter, if any - var err error - if tw, _ := output.(*tabwriter.Writer); tw != nil { - err = tw.Flush() - } - - return err -} - -// Fprint "pretty-prints" an HCL node to output -// It calls Config.Fprint with default settings. -func Fprint(output io.Writer, node ast.Node) error { - return DefaultConfig.Fprint(output, node) -} - -// Format formats src HCL and returns the result. -func Format(src []byte) ([]byte, error) { - node, err := parser.Parse(src) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - if err := DefaultConfig.Fprint(&buf, node); err != nil { - return nil, err - } - - // Add trailing newline to result - buf.WriteString("\n") - return buf.Bytes(), nil -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go deleted file mode 100644 index 624a18fe3a..0000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ /dev/null @@ -1,652 +0,0 @@ -// Package scanner implements a scanner for HCL (HashiCorp Configuration -// Language) source text. -package scanner - -import ( - "bytes" - "fmt" - "os" - "regexp" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/hcl/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == utf8.RuneError && size == 1 { - s.err("illegal UTF-8 encoding") - return ch - } - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - if ch == '\x00' { - s.err("unexpected null character (0x00)") - return eof - } - - if ch == '\uE123' { - s.err("unicode code point U+E123 reserved for internal use") - return utf8.RuneError - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - tok = token.IDENT - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '#', '/': - tok = token.COMMENT - s.scanComment(ch) - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '<': - tok = token.HEREDOC - s.scanHeredoc() - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case '=': - tok = token.ASSIGN - case '+': - tok = token.ADD - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - tok = token.SUB - } - default: - s.err("illegal char") - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -func (s *Scanner) scanComment(ch rune) { - // single line comments - if ch == '#' || (ch == '/' && s.peek() != '*') { - if ch == '/' && s.peek() != '/' { - s.err("expected '/' for comment") - return - } - - ch = s.next() - for ch != '\n' && ch >= 0 && ch != eof { - ch = s.next() - } - if ch != eof && ch >= 0 { - s.unread() - } - return - } - - // be sure we get the character after /* This allows us to find comment's - // that are not erminated - if ch == '/' { - s.next() - ch = s.next() // read character after "/*" - } - - // look for /* - style comments - for { - if ch < 0 || ch == eof { - s.err("comment not terminated") - break - } - - ch0 := ch - ch = s.next() - if ch0 == '*' && ch == '/' { - break - } - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - if ch == '0' { - // check for hexadecimal, octal or float - ch = s.next() - if ch == 'x' || ch == 'X' { - // hexadecimal - ch = s.next() - found := false - for isHexadecimal(ch) { - ch = s.next() - found = true - } - - if !found { - s.err("illegal hexadecimal number") - } - - if ch != eof { - s.unread() - } - - return token.NUMBER - } - - // now it's either something like: 0421(octal) or 0.1231(float) - illegalOctal := false - for isDecimal(ch) { - ch = s.next() - if ch == '8' || ch == '9' { - // this is just a possibility. For example 0159 is illegal, but - // 0159.23 is valid. So we mark a possible illegal octal. If - // the next character is not a period, we'll print the error. - illegalOctal = true - } - } - - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if illegalOctal { - s.err("illegal octal number") - } - - if ch != eof { - s.unread() - } - return token.NUMBER - } - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - return token.NUMBER -} - -// scanMantissa scans the mantissa beginning from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanHeredoc scans a heredoc string -func (s *Scanner) scanHeredoc() { - // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { - break - } - - // Not an anchor match, record the start of a new line - lineStart = s.srcPos.Offset - } - - if ch == eof { - s.err("heredoc not terminated") - return - } - } - - return -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' && braces == 0 { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - start := n - for n > 0 && digitVal(ch) < base { - ch = s.next() - if ch == eof { - // If we see an EOF, we halt any more scanning of digits - // immediately. - break - } - - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - if n != start && ch != eof { - // we scanned all digits, put the last non digit char back, - // only if we read anything at all - s.unread() - } - - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isDigit returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isDecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go deleted file mode 100644 index 5f981eaa2f..0000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go +++ /dev/null @@ -1,241 +0,0 @@ -package strconv - -import ( - "errors" - "unicode/utf8" -) - -// ErrSyntax indicates that a value does not have the right syntax for the target type. -var ErrSyntax = errors.New("invalid syntax") - -// Unquote interprets s as a single-quoted, double-quoted, -// or backquoted Go string literal, returning the string value -// that s quotes. (If s is single-quoted, it would be a Go -// character literal; Unquote returns the corresponding -// one-character string.) -func Unquote(s string) (t string, err error) { - n := len(s) - if n < 2 { - return "", ErrSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", ErrSyntax - } - s = s[1 : n-1] - - if quote != '"' { - return "", ErrSyntax - } - if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { - return "", ErrSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { - switch quote { - case '"': - return s, nil - case '\'': - r, size := utf8.DecodeRuneInString(s) - if size == len(s) && (r != utf8.RuneError || size != 1) { - return s, nil - } - } - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - // If we're starting a '${}' then let it through un-unquoted. - // Specifically: we don't unquote any characters within the `${}` - // section. - if s[0] == '$' && len(s) > 1 && s[1] == '{' { - buf = append(buf, '$', '{') - s = s[2:] - - // Continue reading until we find the closing brace, copying as-is - braces := 1 - for len(s) > 0 && braces > 0 { - r, size := utf8.DecodeRuneInString(s) - if r == utf8.RuneError { - return "", ErrSyntax - } - - s = s[size:] - - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - - switch r { - case '{': - braces++ - case '}': - braces-- - } - } - if braces != 0 { - return "", ErrSyntax - } - if len(s) == 0 { - // If there's no string left, we're done! - break - } else { - // If there's more left, we need to pop back up to the top of the loop - // in case there's another interpolation in this string. - continue - } - } - - if s[0] == '\n' { - return "", ErrSyntax - } - - c, multibyte, ss, err := unquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - if quote == '\'' && len(s) != 0 { - // single-quoted must be single character - return "", ErrSyntax - } - } - return string(buf), nil -} - -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} - -func unhex(b byte) (v rune, ok bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return -} - -func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { - // easy cases - switch c := s[0]; { - case c == quote && (quote == '\'' || quote == '"'): - err = ErrSyntax - return - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // hard case: c is backslash - if len(s) <= 1 { - err = ErrSyntax - return - } - c := s[1] - s = s[2:] - - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case 'x', 'u', 'U': - n := 0 - switch c { - case 'x': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = ErrSyntax - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = ErrSyntax - return - } - v = v<<4 | x - } - s = s[n:] - if c == 'x' { - // single-byte string, possibly not UTF-8 - value = v - break - } - if v > utf8.MaxRune { - err = ErrSyntax - return - } - value = v - multibyte = true - case '0', '1', '2', '3', '4', '5', '6', '7': - v := rune(c) - '0' - if len(s) < 2 { - err = ErrSyntax - return - } - for j := 0; j < 2; j++ { // one digit already; two more - x := rune(s[j]) - '0' - if x < 0 || x > 7 { - err = ErrSyntax - return - } - v = (v << 3) | x - } - s = s[2:] - if v > 255 { - err = ErrSyntax - return - } - value = v - case '\\': - value = '\\' - case '\'', '"': - if c != quote { - err = ErrSyntax - return - } - value = rune(c) - default: - err = ErrSyntax - return - } - tail = s - return -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go deleted file mode 100644 index 59c1bb72d4..0000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go deleted file mode 100644 index e37c0664ec..0000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/token.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package token defines constants representing the lexical tokens for HCL -// (HashiCorp Configuration Language) -package token - -import ( - "fmt" - "strconv" - "strings" - - hclstrconv "github.com/hashicorp/hcl/hcl/strconv" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string - JSON bool -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - COMMENT - - identifier_beg - IDENT // literals - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - HEREDOC // < 0 { - // Pop the current item - n := len(frontier) - item := frontier[n-1] - frontier = frontier[:n-1] - - switch v := item.Val.(type) { - case *ast.ObjectType: - items, frontier = flattenObjectType(v, item, items, frontier) - case *ast.ListType: - items, frontier = flattenListType(v, item, items, frontier) - default: - items = append(items, item) - } - } - - // Reverse the list since the frontier model runs things backwards - for i := len(items)/2 - 1; i >= 0; i-- { - opp := len(items) - 1 - i - items[i], items[opp] = items[opp], items[i] - } - - // Done! Set the original items - list.Items = items - return n, true - }) -} - -func flattenListType( - ot *ast.ListType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list is empty, keep the original list - if len(ot.List) == 0 { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List { - if _, ok := subitem.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, elem := range ot.List { - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: item.Keys, - Assign: item.Assign, - Val: elem, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} - -func flattenObjectType( - ot *ast.ObjectType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list has no items we do not have to flatten anything - if ot.List.Items == nil { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List.Items { - if _, ok := subitem.Val.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, subitem := range ot.List.Items { - // Copy the new key - keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) - copy(keys, item.Keys) - copy(keys[len(item.Keys):], subitem.Keys) - - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: keys, - Assign: item.Assign, - Val: subitem.Val, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go deleted file mode 100644 index 125a5f0729..0000000000 --- a/vendor/github.com/hashicorp/hcl/json/parser/parser.go +++ /dev/null @@ -1,313 +0,0 @@ -package parser - -import ( - "errors" - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hcltoken "github.com/hashicorp/hcl/hcl/token" - "github.com/hashicorp/hcl/json/scanner" - "github.com/hashicorp/hcl/json/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = fmt.Errorf("%s: %s", pos, msg) - } - - // The root must be an object in JSON - object, err := p.object() - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - // We make our final node an object list so it is more HCL compatible - f.Node = object.List - - // Flatten it, which finds patterns and turns them into more HCL-like - // AST trees. - flattenObjects(f.Node) - - return f, nil -} - -func (p *Parser) objectList() (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // Check for a followup comma. If it isn't a comma, then we're done - if tok := p.scan(); tok.Type != token.COMMA { - break - } - } - - return node, nil -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - switch p.tok.Type { - case token.COLON: - pos := p.tok.Pos - o.Assign = hcltoken.Pos{ - Filename: pos.Filename, - Offset: pos.Offset, - Line: pos.Line, - Column: pos.Column, - } - - o.Val, err = p.objectValue() - if err != nil { - return nil, err - } - } - - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - return nil, errEofToken - case token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{ - Token: p.tok.HCLToken(), - }) - case token.COLON: - // If we have a zero keycount it means that we never got - // an object key, i.e. `{ :`. This is a syntax error. - if keyCount == 0 { - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - - // Done - return keys, nil - case token.ILLEGAL: - return nil, errors.New("illegal") - default: - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) objectValue() (ast.Node, error) { - defer un(trace(p, "ParseObjectValue")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (*ast.ObjectType, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.LBRACE: - return p.objectType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{} - - l, err := p.objectList() - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - o.List = l - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{} - - for { - tok := p.scan() - switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING: - node, err := p.literalType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.COMMA: - continue - case token.LBRACE: - node, err := p.objectType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.BOOL: - // TODO(arslan) should we support? not supported by HCL yet - case token.LBRACK: - // TODO(arslan) should we support nested lists? Even though it's - // written in README of HCL, it's not a part of the grammar - // (not defined in parse.y) - case token.RBRACK: - // finished - return l, nil - default: - return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) - } - - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok.HCLToken(), - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - p.tok = p.sc.Scan() - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go deleted file mode 100644 index fe3f0f0950..0000000000 --- a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go +++ /dev/null @@ -1,451 +0,0 @@ -package scanner - -import ( - "bytes" - "fmt" - "os" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/json/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } else if lit == "null" { - tok = token.NULL - } else { - s.err("illegal char") - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case ':': - tok = token.COLON - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - s.err("illegal char") - } - default: - s.err("illegal char: " + string(ch)) - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - zero := ch == '0' - pos := s.srcPos - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - - // If we have a larger number and this is zero, error - if zero && pos != s.srcPos { - s.err("numbers cannot start with 0") - } - - return token.NUMBER -} - -// scanMantissa scans the mantissa beginning from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if ch == '\n' || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - // we scanned all digits, put the last non digit char back - s.unread() - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isHexadecimal returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isHexadecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go deleted file mode 100644 index 59c1bb72d4..0000000000 --- a/vendor/github.com/hashicorp/hcl/json/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go deleted file mode 100644 index 95a0c3eee6..0000000000 --- a/vendor/github.com/hashicorp/hcl/json/token/token.go +++ /dev/null @@ -1,118 +0,0 @@ -package token - -import ( - "fmt" - "strconv" - - hcltoken "github.com/hashicorp/hcl/hcl/token" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - - identifier_beg - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - NULL // null - literal_end - identifier_end - - operator_beg - LBRACK // [ - LBRACE // { - COMMA // , - PERIOD // . - COLON // : - - RBRACK // ] - RBRACE // } - - operator_end -) - -var tokens = [...]string{ - ILLEGAL: "ILLEGAL", - - EOF: "EOF", - - NUMBER: "NUMBER", - FLOAT: "FLOAT", - BOOL: "BOOL", - STRING: "STRING", - NULL: "NULL", - - LBRACK: "LBRACK", - LBRACE: "LBRACE", - COMMA: "COMMA", - PERIOD: "PERIOD", - COLON: "COLON", - - RBRACK: "RBRACK", - RBRACE: "RBRACE", -} - -// String returns the string corresponding to the token tok. -func (t Type) String() string { - s := "" - if 0 <= t && t < Type(len(tokens)) { - s = tokens[t] - } - if s == "" { - s = "token(" + strconv.Itoa(int(t)) + ")" - } - return s -} - -// IsIdentifier returns true for tokens corresponding to identifiers and basic -// type literals; it returns false otherwise. -func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } - -// IsLiteral returns true for tokens corresponding to basic type literals; it -// returns false otherwise. -func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } - -// IsOperator returns true for tokens corresponding to operators and -// delimiters; it returns false otherwise. -func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } - -// String returns the token's literal text. Note that this is only -// applicable for certain token types, such as token.IDENT, -// token.STRING, etc.. -func (t Token) String() string { - return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) -} - -// HCLToken converts this token to an HCL token. -// -// The token type must be a literal type or this will panic. -func (t Token) HCLToken() hcltoken.Token { - switch t.Type { - case BOOL: - return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} - case FLOAT: - return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} - case NULL: - return hcltoken.Token{Type: hcltoken.STRING, Text: ""} - case NUMBER: - return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} - case STRING: - return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} - default: - panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) - } -} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go deleted file mode 100644 index d9993c2928..0000000000 --- a/vendor/github.com/hashicorp/hcl/lex.go +++ /dev/null @@ -1,38 +0,0 @@ -package hcl - -import ( - "unicode" - "unicode/utf8" -) - -type lexModeValue byte - -const ( - lexModeUnknown lexModeValue = iota - lexModeHcl - lexModeJson -) - -// lexMode returns whether we're going to be parsing in JSON -// mode or HCL mode. -func lexMode(v []byte) lexModeValue { - var ( - r rune - w int - offset int - ) - - for { - r, w = utf8.DecodeRune(v[offset:]) - offset += w - if unicode.IsSpace(r) { - continue - } - if r == '{' { - return lexModeJson - } - break - } - - return lexModeHcl -} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go deleted file mode 100644 index 1fca53c4ce..0000000000 --- a/vendor/github.com/hashicorp/hcl/parse.go +++ /dev/null @@ -1,39 +0,0 @@ -package hcl - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hclParser "github.com/hashicorp/hcl/hcl/parser" - jsonParser "github.com/hashicorp/hcl/json/parser" -) - -// ParseBytes accepts as input byte slice and returns ast tree. -// -// Input can be either JSON or HCL -func ParseBytes(in []byte) (*ast.File, error) { - return parse(in) -} - -// ParseString accepts input as a string and returns ast tree. -func ParseString(input string) (*ast.File, error) { - return parse([]byte(input)) -} - -func parse(in []byte) (*ast.File, error) { - switch lexMode(in) { - case lexModeHcl: - return hclParser.Parse(in) - case lexModeJson: - return jsonParser.Parse(in) - } - - return nil, fmt.Errorf("unknown config format") -} - -// Parse parses the given input and returns the root object. -// -// The input format can be either HCL or JSON. -func Parse(input string) (*ast.File, error) { - return parse([]byte(input)) -} diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE deleted file mode 100644 index 5f920e9732..0000000000 --- a/vendor/github.com/inconshreveable/mousetrap/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2022 Alan Shreve (@inconshreveable) - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md deleted file mode 100644 index 7a950d1774..0000000000 --- a/vendor/github.com/inconshreveable/mousetrap/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# mousetrap - -mousetrap is a tiny library that answers a single question. - -On a Windows machine, was the process invoked by someone double clicking on -the executable file while browsing in explorer? - -### Motivation - -Windows developers unfamiliar with command line tools will often "double-click" -the executable for a tool. Because most CLI tools print the help and then exit -when invoked without arguments, this is often very frustrating for those users. - -mousetrap provides a way to detect these invocations so that you can provide -more helpful behavior and instructions on how to run the CLI tool. To see what -this looks like, both from an organizational and a technical perspective, see -https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ - -### The interface - -The library exposes a single interface: - - func StartedByExplorer() (bool) diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go deleted file mode 100644 index 06a91f0868..0000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !windows -// +build !windows - -package mousetrap - -// StartedByExplorer returns true if the program was invoked by the user -// double-clicking on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -// -// On non-Windows platforms, it always returns false. -func StartedByExplorer() bool { - return false -} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go deleted file mode 100644 index 0c56880216..0000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ /dev/null @@ -1,42 +0,0 @@ -package mousetrap - -import ( - "syscall" - "unsafe" -) - -func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(snapshot) - var procEntry syscall.ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = syscall.Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = syscall.Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - pe, err := getProcessEntry(syscall.Getppid()) - if err != nil { - return false - } - return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) -} diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore deleted file mode 100644 index 5091fb0736..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/jpgo -jmespath-fuzz.zip -cpu.out -go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml deleted file mode 100644 index c56f37c0c9..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: go - -sudo: false - -go: - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - 1.13.x - - 1.14.x - - 1.15.x - - tip - -allow_failures: - - go: tip - -script: make build - -matrix: - include: - - language: go - go: 1.15.x - script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE deleted file mode 100644 index b03310a91f..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2015 James Saryerwinnie - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile deleted file mode 100644 index fb38ec2760..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/Makefile +++ /dev/null @@ -1,51 +0,0 @@ - -CMD = jpgo - -SRC_PKGS=./ ./cmd/... ./fuzz/... - -help: - @echo "Please use \`make ' where is one of" - @echo " test to run all the tests" - @echo " build to build the library and jp executable" - @echo " generate to run codegen" - - -generate: - go generate ${SRC_PKGS} - -build: - rm -f $(CMD) - go build ${SRC_PKGS} - rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... - mv cmd/$(CMD)/$(CMD) . - -test: test-internal-testify - echo "making tests ${SRC_PKGS}" - go test -v ${SRC_PKGS} - -check: - go vet ${SRC_PKGS} - @echo "golint ${SRC_PKGS}" - @lint=`golint ${SRC_PKGS}`; \ - lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ - echo "$$lint"; \ - if [ "$$lint" != "" ]; then exit 1; fi - -htmlc: - go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov - -buildfuzz: - go-fuzz-build github.com/jmespath/go-jmespath/fuzz - -fuzz: buildfuzz - go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata - -bench: - go test -bench . -cpuprofile cpu.out - -pprof-cpu: - go tool pprof ./go-jmespath.test ./cpu.out - -test-internal-testify: - cd internal/testify && go test ./... - diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md deleted file mode 100644 index 110ad79997..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# go-jmespath - A JMESPath implementation in Go - -[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) - - - -go-jmespath is a GO implementation of JMESPath, -which is a query language for JSON. It will take a JSON -document and transform it into another JSON document -through a JMESPath expression. - -Using go-jmespath is really easy. There's a single function -you use, `jmespath.search`: - - -```go -> import "github.com/jmespath/go-jmespath" -> -> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.Search("foo.bar.baz[2]", data) -result = 2 -``` - -In the example we gave the ``search`` function input data of -`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}` as well as the JMESPath -expression `foo.bar.baz[2]`, and the `search` function evaluated -the expression against the input data to produce the result ``2``. - -The JMESPath language can do a lot more than select an element -from a list. Here are a few more examples: - -```go -> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.search("foo.bar", data) -result = { "baz": [ 0, 1, 2, 3, 4 ] } - - -> var jsondata = []byte(`{"foo": [{"first": "a", "last": "b"}, - {"first": "c", "last": "d"}]}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.search({"foo[*].first", data) -result [ 'a', 'c' ] - - -> var jsondata = []byte(`{"foo": [{"age": 20}, {"age": 25}, - {"age": 30}, {"age": 35}, - {"age": 40}]}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.search("foo[?age > `30`]") -result = [ { age: 35 }, { age: 40 } ] -``` - -You can also pre-compile your query. This is usefull if -you are going to run multiple searches with it: - -```go - > var jsondata = []byte(`{"foo": "bar"}`) - > var data interface{} - > err := json.Unmarshal(jsondata, &data) - > precompiled, err := Compile("foo") - > if err != nil{ - > // ... handle the error - > } - > result, err := precompiled.Search(data) - result = "bar" -``` - -## More Resources - -The example above only show a small amount of what -a JMESPath expression can do. If you want to take a -tour of the language, the *best* place to go is the -[JMESPath Tutorial](http://jmespath.org/tutorial.html). - -One of the best things about JMESPath is that it is -implemented in many different programming languages including -python, ruby, php, lua, etc. To see a complete list of libraries, -check out the [JMESPath libraries page](http://jmespath.org/libraries.html). - -And finally, the full JMESPath specification can be found -on the [JMESPath site](http://jmespath.org/specification.html). diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go deleted file mode 100644 index 010efe9bfb..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ /dev/null @@ -1,49 +0,0 @@ -package jmespath - -import "strconv" - -// JMESPath is the representation of a compiled JMES path query. A JMESPath is -// safe for concurrent use by multiple goroutines. -type JMESPath struct { - ast ASTNode - intr *treeInterpreter -} - -// Compile parses a JMESPath expression and returns, if successful, a JMESPath -// object that can be used to match against data. -func Compile(expression string) (*JMESPath, error) { - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - jmespath := &JMESPath{ast: ast, intr: newInterpreter()} - return jmespath, nil -} - -// MustCompile is like Compile but panics if the expression cannot be parsed. -// It simplifies safe initialization of global variables holding compiled -// JMESPaths. -func MustCompile(expression string) *JMESPath { - jmespath, err := Compile(expression) - if err != nil { - panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) - } - return jmespath -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func (jp *JMESPath) Search(data interface{}) (interface{}, error) { - return jp.intr.Execute(jp.ast, data) -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func Search(expression string, data interface{}) (interface{}, error) { - intr := newInterpreter() - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - return intr.Execute(ast, data) -} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go deleted file mode 100644 index 1cd2d239c9..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type astNodeType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" - -var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} - -func (i astNodeType) String() string { - if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { - return fmt.Sprintf("astNodeType(%d)", i) - } - return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go deleted file mode 100644 index 9b7cd89b4b..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/functions.go +++ /dev/null @@ -1,842 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "unicode/utf8" -) - -type jpFunction func(arguments []interface{}) (interface{}, error) - -type jpType string - -const ( - jpUnknown jpType = "unknown" - jpNumber jpType = "number" - jpString jpType = "string" - jpArray jpType = "array" - jpObject jpType = "object" - jpArrayNumber jpType = "array[number]" - jpArrayString jpType = "array[string]" - jpExpref jpType = "expref" - jpAny jpType = "any" -) - -type functionEntry struct { - name string - arguments []argSpec - handler jpFunction - hasExpRef bool -} - -type argSpec struct { - types []jpType - variadic bool -} - -type byExprString struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprString) Len() int { - return len(a.items) -} -func (a *byExprString) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprString) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(string) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(string) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type byExprFloat struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprFloat) Len() int { - return len(a.items) -} -func (a *byExprFloat) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprFloat) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(float64) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(float64) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type functionCaller struct { - functionTable map[string]functionEntry -} - -func newFunctionCaller() *functionCaller { - caller := &functionCaller{} - caller.functionTable = map[string]functionEntry{ - "length": { - name: "length", - arguments: []argSpec{ - {types: []jpType{jpString, jpArray, jpObject}}, - }, - handler: jpfLength, - }, - "starts_with": { - name: "starts_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfStartsWith, - }, - "abs": { - name: "abs", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfAbs, - }, - "avg": { - name: "avg", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfAvg, - }, - "ceil": { - name: "ceil", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfCeil, - }, - "contains": { - name: "contains", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - {types: []jpType{jpAny}}, - }, - handler: jpfContains, - }, - "ends_with": { - name: "ends_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfEndsWith, - }, - "floor": { - name: "floor", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfFloor, - }, - "map": { - name: "amp", - arguments: []argSpec{ - {types: []jpType{jpExpref}}, - {types: []jpType{jpArray}}, - }, - handler: jpfMap, - hasExpRef: true, - }, - "max": { - name: "max", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMax, - }, - "merge": { - name: "merge", - arguments: []argSpec{ - {types: []jpType{jpObject}, variadic: true}, - }, - handler: jpfMerge, - }, - "max_by": { - name: "max_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMaxBy, - hasExpRef: true, - }, - "sum": { - name: "sum", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfSum, - }, - "min": { - name: "min", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMin, - }, - "min_by": { - name: "min_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMinBy, - hasExpRef: true, - }, - "type": { - name: "type", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfType, - }, - "keys": { - name: "keys", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfKeys, - }, - "values": { - name: "values", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfValues, - }, - "sort": { - name: "sort", - arguments: []argSpec{ - {types: []jpType{jpArrayString, jpArrayNumber}}, - }, - handler: jpfSort, - }, - "sort_by": { - name: "sort_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfSortBy, - hasExpRef: true, - }, - "join": { - name: "join", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpArrayString}}, - }, - handler: jpfJoin, - }, - "reverse": { - name: "reverse", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - }, - handler: jpfReverse, - }, - "to_array": { - name: "to_array", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToArray, - }, - "to_string": { - name: "to_string", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToString, - }, - "to_number": { - name: "to_number", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToNumber, - }, - "not_null": { - name: "not_null", - arguments: []argSpec{ - {types: []jpType{jpAny}, variadic: true}, - }, - handler: jpfNotNull, - }, - } - return caller -} - -func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { - if len(e.arguments) == 0 { - return arguments, nil - } - if !e.arguments[len(e.arguments)-1].variadic { - if len(e.arguments) != len(arguments) { - return nil, errors.New("incorrect number of args") - } - for i, spec := range e.arguments { - userArg := arguments[i] - err := spec.typeCheck(userArg) - if err != nil { - return nil, err - } - } - return arguments, nil - } - if len(arguments) < len(e.arguments) { - return nil, errors.New("Invalid arity.") - } - return arguments, nil -} - -func (a *argSpec) typeCheck(arg interface{}) error { - for _, t := range a.types { - switch t { - case jpNumber: - if _, ok := arg.(float64); ok { - return nil - } - case jpString: - if _, ok := arg.(string); ok { - return nil - } - case jpArray: - if isSliceType(arg) { - return nil - } - case jpObject: - if _, ok := arg.(map[string]interface{}); ok { - return nil - } - case jpArrayNumber: - if _, ok := toArrayNum(arg); ok { - return nil - } - case jpArrayString: - if _, ok := toArrayStr(arg); ok { - return nil - } - case jpAny: - return nil - case jpExpref: - if _, ok := arg.(expRef); ok { - return nil - } - } - } - return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) -} - -func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { - entry, ok := f.functionTable[name] - if !ok { - return nil, errors.New("unknown function: " + name) - } - resolvedArgs, err := entry.resolveArgs(arguments) - if err != nil { - return nil, err - } - if entry.hasExpRef { - var extra []interface{} - extra = append(extra, intr) - resolvedArgs = append(extra, resolvedArgs...) - } - return entry.handler(resolvedArgs) -} - -func jpfAbs(arguments []interface{}) (interface{}, error) { - num := arguments[0].(float64) - return math.Abs(num), nil -} - -func jpfLength(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if c, ok := arg.(string); ok { - return float64(utf8.RuneCountInString(c)), nil - } else if isSliceType(arg) { - v := reflect.ValueOf(arg) - return float64(v.Len()), nil - } else if c, ok := arg.(map[string]interface{}); ok { - return float64(len(c)), nil - } - return nil, errors.New("could not compute length()") -} - -func jpfStartsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - prefix := arguments[1].(string) - return strings.HasPrefix(search, prefix), nil -} - -func jpfAvg(arguments []interface{}) (interface{}, error) { - // We've already type checked the value so we can safely use - // type assertions. - args := arguments[0].([]interface{}) - length := float64(len(args)) - numerator := 0.0 - for _, n := range args { - numerator += n.(float64) - } - return numerator / length, nil -} -func jpfCeil(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Ceil(val), nil -} -func jpfContains(arguments []interface{}) (interface{}, error) { - search := arguments[0] - el := arguments[1] - if searchStr, ok := search.(string); ok { - if elStr, ok := el.(string); ok { - return strings.Index(searchStr, elStr) != -1, nil - } - return false, nil - } - // Otherwise this is a generic contains for []interface{} - general := search.([]interface{}) - for _, item := range general { - if item == el { - return true, nil - } - } - return false, nil -} -func jpfEndsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - suffix := arguments[1].(string) - return strings.HasSuffix(search, suffix), nil -} -func jpfFloor(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Floor(val), nil -} -func jpfMap(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - exp := arguments[1].(expRef) - node := exp.ref - arr := arguments[2].([]interface{}) - mapped := make([]interface{}, 0, len(arr)) - for _, value := range arr { - current, err := intr.Execute(node, value) - if err != nil { - return nil, err - } - mapped = append(mapped, current) - } - return mapped, nil -} -func jpfMax(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil - } - // Otherwise we're dealing with a max() of strings. - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil -} -func jpfMerge(arguments []interface{}) (interface{}, error) { - final := make(map[string]interface{}) - for _, m := range arguments { - mapped := m.(map[string]interface{}) - for key, value := range mapped { - final[key] = value - } - } - return final, nil -} -func jpfMaxBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - switch t := start.(type) { - case float64: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - case string: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - default: - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfSum(arguments []interface{}) (interface{}, error) { - items, _ := toArrayNum(arguments[0]) - sum := 0.0 - for _, item := range items { - sum += item - } - return sum, nil -} - -func jpfMin(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil - } - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil -} - -func jpfMinBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if t, ok := start.(float64); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else if t, ok := start.(string); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfType(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if _, ok := arg.(float64); ok { - return "number", nil - } - if _, ok := arg.(string); ok { - return "string", nil - } - if _, ok := arg.([]interface{}); ok { - return "array", nil - } - if _, ok := arg.(map[string]interface{}); ok { - return "object", nil - } - if arg == nil { - return "null", nil - } - if arg == true || arg == false { - return "boolean", nil - } - return nil, errors.New("unknown type") -} -func jpfKeys(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for key := range arg { - collected = append(collected, key) - } - return collected, nil -} -func jpfValues(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for _, value := range arg { - collected = append(collected, value) - } - return collected, nil -} -func jpfSort(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - d := sort.Float64Slice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil - } - // Otherwise we're dealing with sort()'ing strings. - items, _ := toArrayStr(arguments[0]) - d := sort.StringSlice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil -} -func jpfSortBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return arr, nil - } else if len(arr) == 1 { - return arr, nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if _, ok := start.(float64); ok { - sortable := &byExprFloat{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else if _, ok := start.(string); ok { - sortable := &byExprString{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfJoin(arguments []interface{}) (interface{}, error) { - sep := arguments[0].(string) - // We can't just do arguments[1].([]string), we have to - // manually convert each item to a string. - arrayStr := []string{} - for _, item := range arguments[1].([]interface{}) { - arrayStr = append(arrayStr, item.(string)) - } - return strings.Join(arrayStr, sep), nil -} -func jpfReverse(arguments []interface{}) (interface{}, error) { - if s, ok := arguments[0].(string); ok { - r := []rune(s) - for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r), nil - } - items := arguments[0].([]interface{}) - length := len(items) - reversed := make([]interface{}, length) - for i, item := range items { - reversed[length-(i+1)] = item - } - return reversed, nil -} -func jpfToArray(arguments []interface{}) (interface{}, error) { - if _, ok := arguments[0].([]interface{}); ok { - return arguments[0], nil - } - return arguments[:1:1], nil -} -func jpfToString(arguments []interface{}) (interface{}, error) { - if v, ok := arguments[0].(string); ok { - return v, nil - } - result, err := json.Marshal(arguments[0]) - if err != nil { - return nil, err - } - return string(result), nil -} -func jpfToNumber(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if v, ok := arg.(float64); ok { - return v, nil - } - if v, ok := arg.(string); ok { - conv, err := strconv.ParseFloat(v, 64) - if err != nil { - return nil, nil - } - return conv, nil - } - if _, ok := arg.([]interface{}); ok { - return nil, nil - } - if _, ok := arg.(map[string]interface{}); ok { - return nil, nil - } - if arg == nil { - return nil, nil - } - if arg == true || arg == false { - return nil, nil - } - return nil, errors.New("unknown type") -} -func jpfNotNull(arguments []interface{}) (interface{}, error) { - for _, arg := range arguments { - if arg != nil { - return arg, nil - } - } - return nil, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go deleted file mode 100644 index 13c74604c2..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/interpreter.go +++ /dev/null @@ -1,418 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" - "unicode" - "unicode/utf8" -) - -/* This is a tree based interpreter. It walks the AST and directly - interprets the AST to search through a JSON document. -*/ - -type treeInterpreter struct { - fCall *functionCaller -} - -func newInterpreter() *treeInterpreter { - interpreter := treeInterpreter{} - interpreter.fCall = newFunctionCaller() - return &interpreter -} - -type expRef struct { - ref ASTNode -} - -// Execute takes an ASTNode and input data and interprets the AST directly. -// It will produce the result of applying the JMESPath expression associated -// with the ASTNode to the input data "value". -func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { - switch node.nodeType { - case ASTComparator: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - right, err := intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - switch node.value { - case tEQ: - return objsEqual(left, right), nil - case tNE: - return !objsEqual(left, right), nil - } - leftNum, ok := left.(float64) - if !ok { - return nil, nil - } - rightNum, ok := right.(float64) - if !ok { - return nil, nil - } - switch node.value { - case tGT: - return leftNum > rightNum, nil - case tGTE: - return leftNum >= rightNum, nil - case tLT: - return leftNum < rightNum, nil - case tLTE: - return leftNum <= rightNum, nil - } - case ASTExpRef: - return expRef{ref: node.children[0]}, nil - case ASTFunctionExpression: - resolvedArgs := []interface{}{} - for _, arg := range node.children { - current, err := intr.Execute(arg, value) - if err != nil { - return nil, err - } - resolvedArgs = append(resolvedArgs, current) - } - return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) - case ASTField: - if m, ok := value.(map[string]interface{}); ok { - key := node.value.(string) - return m[key], nil - } - return intr.fieldFromStruct(node.value.(string), value) - case ASTFilterProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.filterProjectionWithReflection(node, left) - } - return nil, nil - } - compareNode := node.children[2] - collected := []interface{}{} - for _, element := range sliceType { - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil - case ASTFlatten: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - // If we can't type convert to []interface{}, there's - // a chance this could still work via reflection if we're - // dealing with user provided types. - if isSliceType(left) { - return intr.flattenWithReflection(left) - } - return nil, nil - } - flattened := []interface{}{} - for _, element := range sliceType { - if elementSlice, ok := element.([]interface{}); ok { - flattened = append(flattened, elementSlice...) - } else if isSliceType(element) { - reflectFlat := []interface{}{} - v := reflect.ValueOf(element) - for i := 0; i < v.Len(); i++ { - reflectFlat = append(reflectFlat, v.Index(i).Interface()) - } - flattened = append(flattened, reflectFlat...) - } else { - flattened = append(flattened, element) - } - } - return flattened, nil - case ASTIdentity, ASTCurrentNode: - return value, nil - case ASTIndex: - if sliceType, ok := value.([]interface{}); ok { - index := node.value.(int) - if index < 0 { - index += len(sliceType) - } - if index < len(sliceType) && index >= 0 { - return sliceType[index], nil - } - return nil, nil - } - // Otherwise try via reflection. - rv := reflect.ValueOf(value) - if rv.Kind() == reflect.Slice { - index := node.value.(int) - if index < 0 { - index += rv.Len() - } - if index < rv.Len() && index >= 0 { - v := rv.Index(index) - return v.Interface(), nil - } - } - return nil, nil - case ASTKeyValPair: - return intr.Execute(node.children[0], value) - case ASTLiteral: - return node.value, nil - case ASTMultiSelectHash: - if value == nil { - return nil, nil - } - collected := make(map[string]interface{}) - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - key := child.value.(string) - collected[key] = current - } - return collected, nil - case ASTMultiSelectList: - if value == nil { - return nil, nil - } - collected := []interface{}{} - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - collected = append(collected, current) - } - return collected, nil - case ASTOrExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - matched, err = intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - } - return matched, nil - case ASTAndExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return matched, nil - } - return intr.Execute(node.children[1], value) - case ASTNotExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return true, nil - } - return false, nil - case ASTPipe: - result := value - var err error - for _, child := range node.children { - result, err = intr.Execute(child, result) - if err != nil { - return nil, err - } - } - return result, nil - case ASTProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.projectWithReflection(node, left) - } - return nil, nil - } - collected := []interface{}{} - var current interface{} - for _, element := range sliceType { - current, err = intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - case ASTSubexpression, ASTIndexExpression: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - return intr.Execute(node.children[1], left) - case ASTSlice: - sliceType, ok := value.([]interface{}) - if !ok { - if isSliceType(value) { - return intr.sliceWithReflection(node, value) - } - return nil, nil - } - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - return slice(sliceType, sliceParams) - case ASTValueProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - mapType, ok := left.(map[string]interface{}) - if !ok { - return nil, nil - } - values := make([]interface{}, len(mapType)) - for _, value := range mapType { - values = append(values, value) - } - collected := []interface{}{} - for _, element := range values { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - } - return nil, errors.New("Unknown AST node: " + node.nodeType.String()) -} - -func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { - rv := reflect.ValueOf(value) - first, n := utf8.DecodeRuneInString(key) - fieldName := string(unicode.ToUpper(first)) + key[n:] - if rv.Kind() == reflect.Struct { - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } else if rv.Kind() == reflect.Ptr { - // Handle multiple levels of indirection? - if rv.IsNil() { - return nil, nil - } - rv = rv.Elem() - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } - return nil, nil -} - -func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - flattened := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - if reflect.TypeOf(element).Kind() == reflect.Slice { - // Then insert the contents of the element - // slice into the flattened slice, - // i.e flattened = append(flattened, mySlice...) - elementV := reflect.ValueOf(element) - for j := 0; j < elementV.Len(); j++ { - flattened = append( - flattened, elementV.Index(j).Interface()) - } - } else { - flattened = append(flattened, element) - } - } - return flattened, nil -} - -func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - final := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - final = append(final, element) - } - return slice(final, sliceParams) -} - -func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { - compareNode := node.children[2] - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil -} - -func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if result != nil { - collected = append(collected, result) - } - } - return collected, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go deleted file mode 100644 index 817900c8f5..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/lexer.go +++ /dev/null @@ -1,420 +0,0 @@ -package jmespath - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -type token struct { - tokenType tokType - value string - position int - length int -} - -type tokType int - -const eof = -1 - -// Lexer contains information about the expression being tokenized. -type Lexer struct { - expression string // The expression provided by the user. - currentPos int // The current position in the string. - lastWidth int // The width of the current rune. This - buf bytes.Buffer // Internal buffer used for building up values. -} - -// SyntaxError is the main error used whenever a lexing or parsing error occurs. -type SyntaxError struct { - msg string // Error message displayed to user - Expression string // Expression that generated a SyntaxError - Offset int // The location in the string where the error occurred -} - -func (e SyntaxError) Error() string { - // In the future, it would be good to underline the specific - // location where the error occurred. - return "SyntaxError: " + e.msg -} - -// HighlightLocation will show where the syntax error occurred. -// It will place a "^" character on a line below the expression -// at the point where the syntax error occurred. -func (e SyntaxError) HighlightLocation() string { - return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" -} - -//go:generate stringer -type=tokType -const ( - tUnknown tokType = iota - tStar - tDot - tFilter - tFlatten - tLparen - tRparen - tLbracket - tRbracket - tLbrace - tRbrace - tOr - tPipe - tNumber - tUnquotedIdentifier - tQuotedIdentifier - tComma - tColon - tLT - tLTE - tGT - tGTE - tEQ - tNE - tJSONLiteral - tStringLiteral - tCurrent - tExpref - tAnd - tNot - tEOF -) - -var basicTokens = map[rune]tokType{ - '.': tDot, - '*': tStar, - ',': tComma, - ':': tColon, - '{': tLbrace, - '}': tRbrace, - ']': tRbracket, // tLbracket not included because it could be "[]" - '(': tLparen, - ')': tRparen, - '@': tCurrent, -} - -// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. -// When using this bitmask just be sure to shift the rune down 64 bits -// before checking against identifierStartBits. -const identifierStartBits uint64 = 576460745995190270 - -// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. -var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} - -var whiteSpace = map[rune]bool{ - ' ': true, '\t': true, '\n': true, '\r': true, -} - -func (t token) String() string { - return fmt.Sprintf("Token{%+v, %s, %d, %d}", - t.tokenType, t.value, t.position, t.length) -} - -// NewLexer creates a new JMESPath lexer. -func NewLexer() *Lexer { - lexer := Lexer{} - return &lexer -} - -func (lexer *Lexer) next() rune { - if lexer.currentPos >= len(lexer.expression) { - lexer.lastWidth = 0 - return eof - } - r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) - lexer.lastWidth = w - lexer.currentPos += w - return r -} - -func (lexer *Lexer) back() { - lexer.currentPos -= lexer.lastWidth -} - -func (lexer *Lexer) peek() rune { - t := lexer.next() - lexer.back() - return t -} - -// tokenize takes an expression and returns corresponding tokens. -func (lexer *Lexer) tokenize(expression string) ([]token, error) { - var tokens []token - lexer.expression = expression - lexer.currentPos = 0 - lexer.lastWidth = 0 -loop: - for { - r := lexer.next() - if identifierStartBits&(1<<(uint64(r)-64)) > 0 { - t := lexer.consumeUnquotedIdentifier() - tokens = append(tokens, t) - } else if val, ok := basicTokens[r]; ok { - // Basic single char token. - t := token{ - tokenType: val, - value: string(r), - position: lexer.currentPos - lexer.lastWidth, - length: 1, - } - tokens = append(tokens, t) - } else if r == '-' || (r >= '0' && r <= '9') { - t := lexer.consumeNumber() - tokens = append(tokens, t) - } else if r == '[' { - t := lexer.consumeLBracket() - tokens = append(tokens, t) - } else if r == '"' { - t, err := lexer.consumeQuotedIdentifier() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '\'' { - t, err := lexer.consumeRawStringLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '`' { - t, err := lexer.consumeLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '|' { - t := lexer.matchOrElse(r, '|', tOr, tPipe) - tokens = append(tokens, t) - } else if r == '<' { - t := lexer.matchOrElse(r, '=', tLTE, tLT) - tokens = append(tokens, t) - } else if r == '>' { - t := lexer.matchOrElse(r, '=', tGTE, tGT) - tokens = append(tokens, t) - } else if r == '!' { - t := lexer.matchOrElse(r, '=', tNE, tNot) - tokens = append(tokens, t) - } else if r == '=' { - t := lexer.matchOrElse(r, '=', tEQ, tUnknown) - tokens = append(tokens, t) - } else if r == '&' { - t := lexer.matchOrElse(r, '&', tAnd, tExpref) - tokens = append(tokens, t) - } else if r == eof { - break loop - } else if _, ok := whiteSpace[r]; ok { - // Ignore whitespace - } else { - return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) - } - } - tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) - return tokens, nil -} - -// Consume characters until the ending rune "r" is reached. -// If the end of the expression is reached before seeing the -// terminating rune "r", then an error is returned. -// If no error occurs then the matching substring is returned. -// The returned string will not include the ending rune. -func (lexer *Lexer) consumeUntil(end rune) (string, error) { - start := lexer.currentPos - current := lexer.next() - for current != end && current != eof { - if current == '\\' && lexer.peek() != eof { - lexer.next() - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return "", SyntaxError{ - msg: "Unclosed delimiter: " + string(end), - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil -} - -func (lexer *Lexer) consumeLiteral() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('`') - if err != nil { - return token{}, err - } - value = strings.Replace(value, "\\`", "`", -1) - return token{ - tokenType: tJSONLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) consumeRawStringLiteral() (token, error) { - start := lexer.currentPos - currentIndex := start - current := lexer.next() - for current != '\'' && lexer.peek() != eof { - if current == '\\' && lexer.peek() == '\'' { - chunk := lexer.expression[currentIndex : lexer.currentPos-1] - lexer.buf.WriteString(chunk) - lexer.buf.WriteString("'") - lexer.next() - currentIndex = lexer.currentPos - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return token{}, SyntaxError{ - msg: "Unclosed delimiter: '", - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - if currentIndex < lexer.currentPos { - lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) - } - value := lexer.buf.String() - // Reset the buffer so it can reused again. - lexer.buf.Reset() - return token{ - tokenType: tStringLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: lexer.expression, - Offset: lexer.currentPos - 1, - } -} - -// Checks for a two char token, otherwise matches a single character -// token. This is used whenever a two char token overlaps a single -// char token, e.g. "||" -> tPipe, "|" -> tOr. -func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == second { - t = token{ - tokenType: matchedType, - value: string(first) + string(second), - position: start, - length: 2, - } - } else { - lexer.back() - t = token{ - tokenType: singleCharType, - value: string(first), - position: start, - length: 1, - } - } - return t -} - -func (lexer *Lexer) consumeLBracket() token { - // There's three options here: - // 1. A filter expression "[?" - // 2. A flatten operator "[]" - // 3. A bare rbracket "[" - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == '?' { - t = token{ - tokenType: tFilter, - value: "[?", - position: start, - length: 2, - } - } else if nextRune == ']' { - t = token{ - tokenType: tFlatten, - value: "[]", - position: start, - length: 2, - } - } else { - t = token{ - tokenType: tLbracket, - value: "[", - position: start, - length: 1, - } - lexer.back() - } - return t -} - -func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('"') - if err != nil { - return token{}, err - } - var decoded string - asJSON := []byte("\"" + value + "\"") - if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { - return token{}, err - } - return token{ - tokenType: tQuotedIdentifier, - value: decoded, - position: start - 1, - length: len(decoded), - }, nil -} - -func (lexer *Lexer) consumeUnquotedIdentifier() token { - // Consume runes until we reach the end of an unquoted - // identifier. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tUnquotedIdentifier, - value: value, - position: start, - length: lexer.currentPos - start, - } -} - -func (lexer *Lexer) consumeNumber() token { - // Consume runes until we reach something that's not a number. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < '0' || r > '9' { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tNumber, - value: value, - position: start, - length: lexer.currentPos - start, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go deleted file mode 100644 index 4abc303ab4..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/parser.go +++ /dev/null @@ -1,603 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" -) - -type astNodeType int - -//go:generate stringer -type astNodeType -const ( - ASTEmpty astNodeType = iota - ASTComparator - ASTCurrentNode - ASTExpRef - ASTFunctionExpression - ASTField - ASTFilterProjection - ASTFlatten - ASTIdentity - ASTIndex - ASTIndexExpression - ASTKeyValPair - ASTLiteral - ASTMultiSelectHash - ASTMultiSelectList - ASTOrExpression - ASTAndExpression - ASTNotExpression - ASTPipe - ASTProjection - ASTSubexpression - ASTSlice - ASTValueProjection -) - -// ASTNode represents the abstract syntax tree of a JMESPath expression. -type ASTNode struct { - nodeType astNodeType - value interface{} - children []ASTNode -} - -func (node ASTNode) String() string { - return node.PrettyPrint(0) -} - -// PrettyPrint will pretty print the parsed AST. -// The AST is an implementation detail and this pretty print -// function is provided as a convenience method to help with -// debugging. You should not rely on its output as the internal -// structure of the AST may change at any time. -func (node ASTNode) PrettyPrint(indent int) string { - spaces := strings.Repeat(" ", indent) - output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) - nextIndent := indent + 2 - if node.value != nil { - if converted, ok := node.value.(fmt.Stringer); ok { - // Account for things like comparator nodes - // that are enums with a String() method. - output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) - } else { - output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) - } - } - lastIndex := len(node.children) - if lastIndex > 0 { - output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) - childIndent := nextIndent + 2 - for _, elem := range node.children { - output += elem.PrettyPrint(childIndent) - } - } - output += fmt.Sprintf("%s}\n", spaces) - return output -} - -var bindingPowers = map[tokType]int{ - tEOF: 0, - tUnquotedIdentifier: 0, - tQuotedIdentifier: 0, - tRbracket: 0, - tRparen: 0, - tComma: 0, - tRbrace: 0, - tNumber: 0, - tCurrent: 0, - tExpref: 0, - tColon: 0, - tPipe: 1, - tOr: 2, - tAnd: 3, - tEQ: 5, - tLT: 5, - tLTE: 5, - tGT: 5, - tGTE: 5, - tNE: 5, - tFlatten: 9, - tStar: 20, - tFilter: 21, - tDot: 40, - tNot: 45, - tLbrace: 50, - tLbracket: 55, - tLparen: 60, -} - -// Parser holds state about the current expression being parsed. -type Parser struct { - expression string - tokens []token - index int -} - -// NewParser creates a new JMESPath parser. -func NewParser() *Parser { - p := Parser{} - return &p -} - -// Parse will compile a JMESPath expression. -func (p *Parser) Parse(expression string) (ASTNode, error) { - lexer := NewLexer() - p.expression = expression - p.index = 0 - tokens, err := lexer.tokenize(expression) - if err != nil { - return ASTNode{}, err - } - p.tokens = tokens - parsed, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() != tEOF { - return ASTNode{}, p.syntaxError(fmt.Sprintf( - "Unexpected token at the end of the expression: %s", p.current())) - } - return parsed, nil -} - -func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { - var err error - leftToken := p.lookaheadToken(0) - p.advance() - leftNode, err := p.nud(leftToken) - if err != nil { - return ASTNode{}, err - } - currentToken := p.current() - for bindingPower < bindingPowers[currentToken] { - p.advance() - leftNode, err = p.led(currentToken, leftNode) - if err != nil { - return ASTNode{}, err - } - currentToken = p.current() - } - return leftNode, nil -} - -func (p *Parser) parseIndexExpression() (ASTNode, error) { - if p.lookahead(0) == tColon || p.lookahead(1) == tColon { - return p.parseSliceExpression() - } - indexStr := p.lookaheadToken(0).value - parsedInt, err := strconv.Atoi(indexStr) - if err != nil { - return ASTNode{}, err - } - indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} - p.advance() - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return indexNode, nil -} - -func (p *Parser) parseSliceExpression() (ASTNode, error) { - parts := []*int{nil, nil, nil} - index := 0 - current := p.current() - for current != tRbracket && index < 3 { - if current == tColon { - index++ - p.advance() - } else if current == tNumber { - parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) - if err != nil { - return ASTNode{}, err - } - parts[index] = &parsedInt - p.advance() - } else { - return ASTNode{}, p.syntaxError( - "Expected tColon or tNumber" + ", received: " + p.current().String()) - } - current = p.current() - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTSlice, - value: parts, - }, nil -} - -func (p *Parser) match(tokenType tokType) error { - if p.current() == tokenType { - p.advance() - return nil - } - return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) -} - -func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { - switch tokenType { - case tDot: - if p.current() != tStar { - right, err := p.parseDotRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTSubexpression, - children: []ASTNode{node, right}, - }, err - } - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTValueProjection, - children: []ASTNode{node, right}, - }, err - case tPipe: - right, err := p.parseExpression(bindingPowers[tPipe]) - return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err - case tOr: - right, err := p.parseExpression(bindingPowers[tOr]) - return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err - case tAnd: - right, err := p.parseExpression(bindingPowers[tAnd]) - return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err - case tLparen: - name := node.value - var args []ASTNode - for p.current() != tRparen { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() == tComma { - if err := p.match(tComma); err != nil { - return ASTNode{}, err - } - } - args = append(args, expression) - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTFunctionExpression, - value: name, - children: args, - }, nil - case tFilter: - return p.parseFilter(node) - case tFlatten: - left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{left, right}, - }, err - case tEQ, tNE, tGT, tGTE, tLT, tLTE: - right, err := p.parseExpression(bindingPowers[tokenType]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTComparator, - value: tokenType, - children: []ASTNode{node, right}, - }, nil - case tLbracket: - tokenType := p.current() - var right ASTNode - var err error - if tokenType == tNumber || tokenType == tColon { - right, err = p.parseIndexExpression() - if err != nil { - return ASTNode{}, err - } - return p.projectIfSlice(node, right) - } - // Otherwise this is a projection. - if err := p.match(tStar); err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{node, right}, - }, nil - } - return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) -} - -func (p *Parser) nud(token token) (ASTNode, error) { - switch token.tokenType { - case tJSONLiteral: - var parsed interface{} - err := json.Unmarshal([]byte(token.value), &parsed) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTLiteral, value: parsed}, nil - case tStringLiteral: - return ASTNode{nodeType: ASTLiteral, value: token.value}, nil - case tUnquotedIdentifier: - return ASTNode{ - nodeType: ASTField, - value: token.value, - }, nil - case tQuotedIdentifier: - node := ASTNode{nodeType: ASTField, value: token.value} - if p.current() == tLparen { - return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) - } - return node, nil - case tStar: - left := ASTNode{nodeType: ASTIdentity} - var right ASTNode - var err error - if p.current() == tRbracket { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - } - return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err - case tFilter: - return p.parseFilter(ASTNode{nodeType: ASTIdentity}) - case tLbrace: - return p.parseMultiSelectHash() - case tFlatten: - left := ASTNode{ - nodeType: ASTFlatten, - children: []ASTNode{{nodeType: ASTIdentity}}, - } - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil - case tLbracket: - tokenType := p.current() - //var right ASTNode - if tokenType == tNumber || tokenType == tColon { - right, err := p.parseIndexExpression() - if err != nil { - return ASTNode{}, nil - } - return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) - } else if tokenType == tStar && p.lookahead(1) == tRbracket { - p.advance() - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{{nodeType: ASTIdentity}, right}, - }, nil - } else { - return p.parseMultiSelectList() - } - case tCurrent: - return ASTNode{nodeType: ASTCurrentNode}, nil - case tExpref: - expression, err := p.parseExpression(bindingPowers[tExpref]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil - case tNot: - expression, err := p.parseExpression(bindingPowers[tNot]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil - case tLparen: - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return expression, nil - case tEOF: - return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) - } - - return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) -} - -func (p *Parser) parseMultiSelectList() (ASTNode, error) { - var expressions []ASTNode - for { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - expressions = append(expressions, expression) - if p.current() == tRbracket { - break - } - err = p.match(tComma) - if err != nil { - return ASTNode{}, err - } - } - err := p.match(tRbracket) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTMultiSelectList, - children: expressions, - }, nil -} - -func (p *Parser) parseMultiSelectHash() (ASTNode, error) { - var children []ASTNode - for { - keyToken := p.lookaheadToken(0) - if err := p.match(tUnquotedIdentifier); err != nil { - if err := p.match(tQuotedIdentifier); err != nil { - return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") - } - } - keyName := keyToken.value - err := p.match(tColon) - if err != nil { - return ASTNode{}, err - } - value, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - node := ASTNode{ - nodeType: ASTKeyValPair, - value: keyName, - children: []ASTNode{value}, - } - children = append(children, node) - if p.current() == tComma { - err := p.match(tComma) - if err != nil { - return ASTNode{}, nil - } - } else if p.current() == tRbrace { - err := p.match(tRbrace) - if err != nil { - return ASTNode{}, nil - } - break - } - } - return ASTNode{ - nodeType: ASTMultiSelectHash, - children: children, - }, nil -} - -func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { - indexExpr := ASTNode{ - nodeType: ASTIndexExpression, - children: []ASTNode{left, right}, - } - if right.nodeType == ASTSlice { - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{indexExpr, right}, - }, err - } - return indexExpr, nil -} -func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { - var right, condition ASTNode - var err error - condition, err = p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - if p.current() == tFlatten { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tFilter]) - if err != nil { - return ASTNode{}, err - } - } - - return ASTNode{ - nodeType: ASTFilterProjection, - children: []ASTNode{node, right, condition}, - }, nil -} - -func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { - lookahead := p.current() - if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { - return p.parseExpression(bindingPower) - } else if lookahead == tLbracket { - if err := p.match(tLbracket); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectList() - } else if lookahead == tLbrace { - if err := p.match(tLbrace); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectHash() - } - return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") -} - -func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { - current := p.current() - if bindingPowers[current] < 10 { - return ASTNode{nodeType: ASTIdentity}, nil - } else if current == tLbracket { - return p.parseExpression(bindingPower) - } else if current == tFilter { - return p.parseExpression(bindingPower) - } else if current == tDot { - err := p.match(tDot) - if err != nil { - return ASTNode{}, err - } - return p.parseDotRHS(bindingPower) - } else { - return ASTNode{}, p.syntaxError("Error") - } -} - -func (p *Parser) lookahead(number int) tokType { - return p.lookaheadToken(number).tokenType -} - -func (p *Parser) current() tokType { - return p.lookahead(0) -} - -func (p *Parser) lookaheadToken(number int) token { - return p.tokens[p.index+number] -} - -func (p *Parser) advance() { - p.index++ -} - -func tokensOneOf(elements []tokType, token tokType) bool { - for _, elem := range elements { - if elem == token { - return true - } - } - return false -} - -func (p *Parser) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: p.lookaheadToken(0).position, - } -} - -// Create a SyntaxError based on the provided token. -// This differs from syntaxError() which creates a SyntaxError -// based on the current lookahead token. -func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: t.position, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go deleted file mode 100644 index dae79cbdf3..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/toktype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type=tokType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" - -var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} - -func (i tokType) String() string { - if i < 0 || i >= tokType(len(_tokType_index)-1) { - return fmt.Sprintf("tokType(%d)", i) - } - return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go deleted file mode 100644 index ddc1b7d7d4..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/util.go +++ /dev/null @@ -1,185 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" -) - -// IsFalse determines if an object is false based on the JMESPath spec. -// JMESPath defines false values to be any of: -// - An empty string array, or hash. -// - The boolean value false. -// - nil -func isFalse(value interface{}) bool { - switch v := value.(type) { - case bool: - return !v - case []interface{}: - return len(v) == 0 - case map[string]interface{}: - return len(v) == 0 - case string: - return len(v) == 0 - case nil: - return true - } - // Try the reflection cases before returning false. - rv := reflect.ValueOf(value) - switch rv.Kind() { - case reflect.Struct: - // A struct type will never be false, even if - // all of its values are the zero type. - return false - case reflect.Slice, reflect.Map: - return rv.Len() == 0 - case reflect.Ptr: - if rv.IsNil() { - return true - } - // If it's a pointer type, we'll try to deref the pointer - // and evaluate the pointer value for isFalse. - element := rv.Elem() - return isFalse(element.Interface()) - } - return false -} - -// ObjsEqual is a generic object equality check. -// It will take two arbitrary objects and recursively determine -// if they are equal. -func objsEqual(left interface{}, right interface{}) bool { - return reflect.DeepEqual(left, right) -} - -// SliceParam refers to a single part of a slice. -// A slice consists of a start, a stop, and a step, similar to -// python slices. -type sliceParam struct { - N int - Specified bool -} - -// Slice supports [start:stop:step] style slicing that's supported in JMESPath. -func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { - computed, err := computeSliceParams(len(slice), parts) - if err != nil { - return nil, err - } - start, stop, step := computed[0], computed[1], computed[2] - result := []interface{}{} - if step > 0 { - for i := start; i < stop; i += step { - result = append(result, slice[i]) - } - } else { - for i := start; i > stop; i += step { - result = append(result, slice[i]) - } - } - return result, nil -} - -func computeSliceParams(length int, parts []sliceParam) ([]int, error) { - var start, stop, step int - if !parts[2].Specified { - step = 1 - } else if parts[2].N == 0 { - return nil, errors.New("Invalid slice, step cannot be 0") - } else { - step = parts[2].N - } - var stepValueNegative bool - if step < 0 { - stepValueNegative = true - } else { - stepValueNegative = false - } - - if !parts[0].Specified { - if stepValueNegative { - start = length - 1 - } else { - start = 0 - } - } else { - start = capSlice(length, parts[0].N, step) - } - - if !parts[1].Specified { - if stepValueNegative { - stop = -1 - } else { - stop = length - } - } else { - stop = capSlice(length, parts[1].N, step) - } - return []int{start, stop, step}, nil -} - -func capSlice(length int, actual int, step int) int { - if actual < 0 { - actual += length - if actual < 0 { - if step < 0 { - actual = -1 - } else { - actual = 0 - } - } - } else if actual >= length { - if step < 0 { - actual = length - 1 - } else { - actual = length - } - } - return actual -} - -// ToArrayNum converts an empty interface type to a slice of float64. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. -func toArrayNum(data interface{}) ([]float64, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]float64, len(d)) - for i, el := range d { - item, ok := el.(float64) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -// ToArrayStr converts an empty interface type to a slice of strings. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. If the input data could be entirely -// converted, then the converted data, along with a second value of true, -// will be returned. -func toArrayStr(data interface{}) ([]string, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]string, len(d)) - for i, el := range d { - item, ok := el.(string) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -func isSliceType(v interface{}) bool { - if v == nil { - return false - } - return reflect.TypeOf(v).Kind() == reflect.Slice -} diff --git a/vendor/github.com/josharian/intern/README.md b/vendor/github.com/josharian/intern/README.md deleted file mode 100644 index ffc44b219b..0000000000 --- a/vendor/github.com/josharian/intern/README.md +++ /dev/null @@ -1,5 +0,0 @@ -Docs: https://godoc.org/github.com/josharian/intern - -See also [Go issue 5160](https://golang.org/issue/5160). - -License: MIT diff --git a/vendor/github.com/josharian/intern/intern.go b/vendor/github.com/josharian/intern/intern.go deleted file mode 100644 index 7acb1fe90a..0000000000 --- a/vendor/github.com/josharian/intern/intern.go +++ /dev/null @@ -1,44 +0,0 @@ -// Package intern interns strings. -// Interning is best effort only. -// Interned strings may be removed automatically -// at any time without notification. -// All functions may be called concurrently -// with themselves and each other. -package intern - -import "sync" - -var ( - pool sync.Pool = sync.Pool{ - New: func() interface{} { - return make(map[string]string) - }, - } -) - -// String returns s, interned. -func String(s string) string { - m := pool.Get().(map[string]string) - c, ok := m[s] - if ok { - pool.Put(m) - return c - } - m[s] = s - pool.Put(m) - return s -} - -// Bytes returns b converted to a string, interned. -func Bytes(b []byte) string { - m := pool.Get().(map[string]string) - c, ok := m[string(b)] - if ok { - pool.Put(m) - return c - } - s := string(b) - m[s] = s - pool.Put(m) - return s -} diff --git a/vendor/github.com/josharian/intern/license.md b/vendor/github.com/josharian/intern/license.md deleted file mode 100644 index 353d3055f0..0000000000 --- a/vendor/github.com/josharian/intern/license.md +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2019 Josh Bleecher Snyder - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes deleted file mode 100644 index 402433593c..0000000000 --- a/vendor/github.com/klauspost/compress/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -* -text -*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore deleted file mode 100644 index d31b378152..0000000000 --- a/vendor/github.com/klauspost/compress/.gitignore +++ /dev/null @@ -1,32 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -/s2/cmd/_s2sx/sfx-exe - -# Linux perf files -perf.data -perf.data.old - -# gdb history -.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml deleted file mode 100644 index 4c28dff465..0000000000 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ /dev/null @@ -1,127 +0,0 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com -before: - hooks: - - ./gen.sh - - go install mvdan.cc/garble@v0.10.1 - -builds: - - - id: "s2c" - binary: s2c - main: ./s2/cmd/s2c/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - gobinary: garble - - - id: "s2d" - binary: s2d - main: ./s2/cmd/s2d/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - gobinary: garble - - - id: "s2sx" - binary: s2sx - main: ./s2/cmd/_s2sx/main.go - flags: - - -modfile=s2sx.mod - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - gobinary: garble - -archives: - - - id: s2-binaries - name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" - format_overrides: - - goos: windows - format: zip - files: - - unpack/* - - s2/LICENSE - - s2/README.md -checksum: - name_template: 'checksums.txt' -snapshot: - name_template: "{{ .Tag }}-next" -changelog: - sort: asc - filters: - exclude: - - '^doc:' - - '^docs:' - - '^test:' - - '^tests:' - - '^Update\sREADME.md' - -nfpms: - - - file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" - vendor: Klaus Post - homepage: https://github.com/klauspost/compress - maintainer: Klaus Post - description: S2 Compression Tool - license: BSD 3-Clause - formats: - - deb - - rpm diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE deleted file mode 100644 index 87d5574777..0000000000 --- a/vendor/github.com/klauspost/compress/LICENSE +++ /dev/null @@ -1,304 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------- - -Files: gzhttp/* - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016-2017 The New York Times Company - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------------------- - -Files: s2/cmd/internal/readahead/* - -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------- -Files: snappy/* -Files: internal/snapref/* - -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------ - -Files: s2/cmd/internal/filepathx/* - -Copyright 2016 The filepathx Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md deleted file mode 100644 index 43de486775..0000000000 --- a/vendor/github.com/klauspost/compress/README.md +++ /dev/null @@ -1,661 +0,0 @@ -# compress - -This package provides various compression algorithms. - -* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. -* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). -* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. -* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. -* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. -* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. - -[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) -[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) -[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) - -# changelog - -* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) - * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 - * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 - * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 - * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 - * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 - * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 - -* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) - * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 - * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 - -* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) - * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 - * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 - * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 - * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 - -* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) - * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 - * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 - -* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) - * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 - * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 - * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 - * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 - * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 - * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 - -* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) - * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 - * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 - * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 - * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 - * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 - -* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) - * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 - * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 - * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 - * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 - * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 - * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 - -
- See changes to v1.15.x - -* Jan 21st, 2023 (v1.15.15) - * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 - * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 - * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 - * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 - -* Jan 3rd, 2023 (v1.15.14) - - * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 - * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 - * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 - * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 - -* Dec 11, 2022 (v1.15.13) - * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 - * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 - -* Oct 26, 2022 (v1.15.12) - - * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 - * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 - -* Sept 26, 2022 (v1.15.11) - - * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 - * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 - * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 - * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 - -* Sept 16, 2022 (v1.15.10) - - * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 - * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 - * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 - * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 - * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 - * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 - * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 - -* July 21, 2022 (v1.15.9) - - * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 - * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 - * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 - -* July 13, 2022 (v1.15.8) - - * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 - * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 - * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 - * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 - * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 - * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 - * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 - -* June 29, 2022 (v1.15.7) - - * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 - * zip: Merge upstream https://github.com/klauspost/compress/pull/631 - * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 - * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 - * flate: Faster histograms https://github.com/klauspost/compress/pull/620 - * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 - -* June 3, 2022 (v1.15.6) - * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 - * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 - * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 - * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 - * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 - * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 - * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 - * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 - * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 - * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 - -* May 25, 2022 (v1.15.5) - * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 - * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 - * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 - * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 - * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 - * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 - * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 - * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 - * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 - - -* May 11, 2022 (v1.15.4) - * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) - * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) - * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) - * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) - -* May 5, 2022 (v1.15.3) - * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) - * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) - -* Apr 26, 2022 (v1.15.2) - * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) - * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) - * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) - * Minimum version is Go 1.16, added CI test on 1.18. - -* Mar 11, 2022 (v1.15.1) - * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) - * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) - * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) - * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) - * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) - -* Mar 3, 2022 (v1.15.0) - * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) - * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) - * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) - * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) - * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) - * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) - -Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. - -Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. - -While the release has been extensively tested, it is recommended to testing when upgrading. - -
- -
- See changes to v1.14.x - -* Feb 22, 2022 (v1.14.4) - * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) - * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) - * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 - * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) - -* Feb 17, 2022 (v1.14.3) - * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) - * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) - * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) - -* Jan 25, 2022 (v1.14.2) - * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) - * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) - * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) - * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) - * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) - * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) - -* Jan 11, 2022 (v1.14.1) - * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) - * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) - * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) - * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) - * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) -
- -
- See changes to v1.13.x - -* Aug 30, 2021 (v1.13.5) - * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) - * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) - * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) - * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) - -* Aug 12, 2021 (v1.13.4) - * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). - * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) - -* Aug 3, 2021 (v1.13.3) - * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) - * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) - * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) - * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) - * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) - * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) - -* Jun 14, 2021 (v1.13.1) - * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) - * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) - * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) - * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) - -* Jun 3, 2021 (v1.13.0) - * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. - * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) - * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) -
- - -
- See changes to v1.12.x - -* May 25, 2021 (v1.12.3) - * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) - * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) - * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) - -* Apr 27, 2021 (v1.12.2) - * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) - * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) - * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) - * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) - * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) - * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) - -* Apr 14, 2021 (v1.12.1) - * snappy package removed. Upstream added as dependency. - * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) - * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) - * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) - * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) - * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) - * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) -
- -
- See changes to v1.11.x - -* Mar 26, 2021 (v1.11.13) - * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) - * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) - * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) - * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) - -* Mar 5, 2021 (v1.11.12) - * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). - * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) - -* Mar 1, 2021 (v1.11.9) - * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) - * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) - * s2: Fix binaries. - -* Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. - * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) - * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) - * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) - * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) - -* Jan 14, 2021 (v1.11.7) - * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) - * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) - * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) - * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) - * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) - -* Jan 7, 2021 (v1.11.6) - * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) - * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) - -* Dec 20, 2020 (v1.11.4) - * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) - * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) - * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) - * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) - * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) - -* Nov 15, 2020 (v1.11.3) - * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) - * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) - -* Oct 11, 2020 (v1.11.2) - * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) - -* Oct 1, 2020 (v1.11.1) - * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) - -* Sept 8, 2020 (v1.11.0) - * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) - * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) - * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) -
- -
- See changes to v1.10.x - -* July 8, 2020 (v1.10.11) - * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) - * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) - -* June 23, 2020 (v1.10.10) - * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) - -* June 16, 2020 (v1.10.9): - * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) - * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) - * Fuzzit tests removed. The service has been purchased and is no longer available. - -* June 5, 2020 (v1.10.8): - * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) - -* June 1, 2020 (v1.10.7): - * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) - * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) - * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) - -* May 21, 2020: (v1.10.6) - * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) - * zstd: Stricter decompression checks. - -* April 12, 2020: (v1.10.5) - * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) - -* Apr 8, 2020: (v1.10.4) - * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) -* Mar 11, 2020: (v1.10.3) - * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) - * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) - * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) - * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) - * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) - -* Feb 27, 2020: (v1.10.2) - * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) - * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) - -* Feb 18, 2020: (v1.10.1) - * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) - * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) - * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) - -* Feb 4, 2020: (v1.10.0) - * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) - * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) - * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) - * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) - -
- -
- See changes prior to v1.10.0 - -* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). -* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) -* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. -* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. -* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) -* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. -* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) -* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features -* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) -* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) -* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. -* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) -* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) -* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) -* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. -* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. -* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) -* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. -* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) -* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. -* Nov 11, 2019: Reduce inflate memory use by 1KB. -* Nov 10, 2019: Less allocations in deflate bit writer. -* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. -* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) -* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) -* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) -* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) - -
- -
- See changes prior to v1.9.0 - -* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) -* Oct 3, 2019: Fix inconsistent results on broken zstd streams. -* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) -* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). -* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). -* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). -* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. -* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. -* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. -* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. -* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. -* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. -* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. -* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) -* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) -* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) -* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) -* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. -* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. -* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. -* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. -* June 17, 2019: zstd decompression bugfix. -* June 17, 2019: fix 32 bit builds. -* June 17, 2019: Easier use in modules (less dependencies). -* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. -* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. -* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. -* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! -* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. -* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. -* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). -* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. -* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). -* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. -* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. -* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. -* May 28, 2017: Reduce allocations when resetting decoder. -* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. -* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). -* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. -* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. -* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. -* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. -* Mar 24, 2016: Small speedup for level 1-3. -* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. -* Feb 19, 2016: Handle small payloads faster in level 1-3. -* Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. -* Feb 14, 2016: Snappy: Merge upstream changes. -* Feb 14, 2016: Snappy: Fix aggressive skipping. -* Feb 14, 2016: Snappy: Update benchmark. -* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. -* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. -* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. -* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. -* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. -* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. -* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. -* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. -* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! -* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). -* Nov 20 2015: Small optimization to bit writer on 64 bit systems. -* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). -* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. -* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file -* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. - -
- -# deflate usage - -The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: - -| old import | new import | Documentation -|--------------------|-----------------------------------------|--------------------| -| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) -| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) -| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) -| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) - -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). - -You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. - -The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). - -Currently there is only minor speedup on decompression (mostly CRC32 calculation). - -Memory usage is typically 1MB for a Writer. stdlib is in the same range. -If you expect to have a lot of concurrently allocated Writers consider using -the stateless compress described below. - -For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). - -# Stateless compression - -This package offers stateless compression as a special option for gzip/deflate. -It will do compression but without maintaining any state between Write calls. - -This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. - -This is only relevant in cases where you expect to run many thousands of compressors concurrently, -but with very little activity. This is *not* intended for regular web servers serving individual requests. - -Because of this, the size of actual Write calls will affect output size. - -In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. - -For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) - -A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: - -``` - // replace 'ioutil.Discard' with your output. - gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) - if err != nil { - return err - } - defer gzw.Close() - - w := bufio.NewWriterSize(gzw, 4096) - defer w.Flush() - - // Write to 'w' -``` - -This will only use up to 4KB in memory when the writer is idle. - -Compression is almost always worse than the fastest compression level -and each write will allocate (a little) memory. - -# Performance Update 2018 - -It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. - -The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. - -The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. - -The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). - - -## Overall differences. - -There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. - -The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. - -This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. - -There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. - -## Web Content - -This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. - -Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. - -Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. - -## Object files - -This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. - -The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. - -The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. - -## Highly Compressible File - -This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. - -It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. - -So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". - -## Medium-High Compressible - -This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. - -We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. - -## Medium Compressible - -I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. - -The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. - - -## Un-compressible Content - -This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. - - -## Huffman only compression - -This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. - -This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). - -Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. - -The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). - -The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. - -For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). - -This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. - -# Other packages - -Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): - -* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. -* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. -* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. -* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. -* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. -* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. -* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. - -# license - -This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md deleted file mode 100644 index ca6685e2b7..0000000000 --- a/vendor/github.com/klauspost/compress/SECURITY.md +++ /dev/null @@ -1,25 +0,0 @@ -# Security Policy - -## Supported Versions - -Security updates are applied only to the latest release. - -## Vulnerability Definition - -A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. - -Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. - -Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. - -It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. - -Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. - -## Reporting a Vulnerability - -If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. - -Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. - -This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go deleted file mode 100644 index ea5a692d51..0000000000 --- a/vendor/github.com/klauspost/compress/compressible.go +++ /dev/null @@ -1,85 +0,0 @@ -package compress - -import "math" - -// Estimate returns a normalized compressibility estimate of block b. -// Values close to zero are likely uncompressible. -// Values above 0.1 are likely to be compressible. -// Values above 0.5 are very compressible. -// Very small lengths will return 0. -func Estimate(b []byte) float64 { - if len(b) < 16 { - return 0 - } - - // Correctly predicted order 1 - hits := 0 - lastMatch := false - var o1 [256]byte - var hist [256]int - c1 := byte(0) - for _, c := range b { - if c == o1[c1] { - // We only count a hit if there was two correct predictions in a row. - if lastMatch { - hits++ - } - lastMatch = true - } else { - lastMatch = false - } - o1[c1] = c - c1 = c - hist[c]++ - } - - // Use x^0.6 to give better spread - prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) - - // Calculate histogram distribution - variance := float64(0) - avg := float64(len(b)) / 256 - - for _, v := range hist { - Δ := float64(v) - avg - variance += Δ * Δ - } - - stddev := math.Sqrt(float64(variance)) / float64(len(b)) - exp := math.Sqrt(1 / float64(len(b))) - - // Subtract expected stddev - stddev -= exp - if stddev < 0 { - stddev = 0 - } - stddev *= 1 + exp - - // Use x^0.4 to give better spread - entropy := math.Pow(stddev, 0.4) - - // 50/50 weight between prediction and histogram distribution - return math.Pow((prediction+entropy)/2, 0.9) -} - -// ShannonEntropyBits returns the number of bits minimum required to represent -// an entropy encoding of the input bytes. -// https://en.wiktionary.org/wiki/Shannon_entropy -func ShannonEntropyBits(b []byte) int { - if len(b) == 0 { - return 0 - } - var hist [256]int - for _, c := range b { - hist[c]++ - } - shannon := float64(0) - invTotal := 1.0 / float64(len(b)) - for _, v := range hist[:] { - if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) - } - } - return int(math.Ceil(shannon)) -} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md deleted file mode 100644 index ea7324da67..0000000000 --- a/vendor/github.com/klauspost/compress/fse/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# Finite State Entropy - -This package provides Finite State Entropy encoding and decoding. - -Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) -encoding provides a fast near-optimal symbol encoding/decoding -for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) - -## News - - * Feb 2018: First implementation released. Consider this beta software for now. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `(error)` | An internal error occurred. | - -As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). - -# Performance - -A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. -All compression functions are currently only running on the calling goroutine so only one core will be used per block. - -The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input -is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be -beneficial to transpose all your input values down by 64. - -With moderate block sizes around 64k speed are typically 200MB/s per core for compression and -around 300MB/s decompression speed. - -The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. - -# Plans - -At one point, more internals will be exposed to facilitate more "expert" usage of the components. - -A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go deleted file mode 100644 index f65eb3909c..0000000000 --- a/vendor/github.com/klauspost/compress/fse/bitreader.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "encoding/binary" - "errors" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) uint16 { - if n == 0 || b.bitsRead >= 64 { - return 0 - } - return b.getBitsFast(n) -} - -// getBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.bitsRead >= 64 && b.off == 0 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go deleted file mode 100644 index e82fa3bb7b..0000000000 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import "fmt" - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go deleted file mode 100644 index abade2d605..0000000000 --- a/vendor/github.com/klauspost/compress/fse/bytereader.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go deleted file mode 100644 index 65d777357a..0000000000 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ /dev/null @@ -1,683 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "errors" - "fmt" -) - -// Compress the input bytes. Input must be < 2GB. -// Provide a Scratch buffer to avoid memory allocations. -// Note that the output is also kept in the scratch buffer. -// If input is too hard to compress, ErrIncompressible is returned. -// If input is a single byte value repeated ErrUseRLE is returned. -func Compress(in []byte, s *Scratch) ([]byte, error) { - if len(in) <= 1 { - return nil, ErrIncompressible - } - if len(in) > (2<<30)-1 { - return nil, errors.New("input too big, must be < 2GB") - } - s, err := s.prepare(in) - if err != nil { - return nil, err - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - if maxCount == 0 { - maxCount = s.countSimple(in) - } - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount == len(in) { - // One symbol, use RLE - return nil, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, ErrIncompressible - } - s.optimalTableLog() - err = s.normalizeCount() - if err != nil { - return nil, err - } - err = s.writeCount() - if err != nil { - return nil, err - } - - if false { - err = s.validateNorm() - if err != nil { - return nil, err - } - } - - err = s.buildCTable() - if err != nil { - return nil, err - } - err = s.compress(in) - if err != nil { - return nil, err - } - s.Out = s.bw.out - // Check if we compressed. - if len(s.Out) >= len(in) { - return nil, ErrIncompressible - } - return s.Out, nil -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + first.deltaFindState - c.state = c.stateTable[lu] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encodeZero(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) - c.bw.flush() -} - -// compress is the main compression loop that will encode the input from the last byte to the first. -func (s *Scratch) compress(src []byte) error { - if len(src) <= 2 { - return errors.New("compress: src too small") - } - tt := s.ct.symbolTT[:256] - s.bw.reset(s.Out) - - // Our two states each encodes every second byte. - // Last byte encoded (first byte decoded) will always be encoded by c1. - var c1, c2 cState - - // Encode so remaining size is divisible by 4. - ip := len(src) - if ip&1 == 1 { - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - c1.encodeZero(tt[src[ip-3]]) - ip -= 3 - } else { - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - ip -= 2 - } - if ip&2 != 0 { - c2.encodeZero(tt[src[ip-1]]) - c1.encodeZero(tt[src[ip-2]]) - ip -= 2 - } - src = src[:ip] - - // Main compression loop. - switch { - case !s.zeroBits && s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush. - // We do not need to check if any output is 0 bits. - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - c2.encode(tt[v2]) - c1.encode(tt[v3]) - } - case !s.zeroBits: - // We do not need to check if any output is 0 bits. - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - s.bw.flush32() - c2.encode(tt[v2]) - c1.encode(tt[v3]) - } - case s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - } - default: - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - s.bw.flush32() - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - } - } - - // Flush final state. - // Used to initialize state when decoding. - c2.flush(s.actualTableLog) - c1.flush(s.actualTableLog) - - s.bw.close() - return nil -} - -// writeCount will write the normalized histogram count to header. -// This is read back by readNCount. -func (s *Scratch) writeCount() error { - var ( - tableLog = s.actualTableLog - tableSize = 1 << tableLog - previous0 bool - charnum uint16 - - maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 - - // Write Table Size - bitStream = uint32(tableLog - minTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - ) - if cap(s.Out) < maxHeaderSize { - s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) - } - outP := uint(0) - out := s.Out[:maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return errors.New("internal error: remaining<1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += (bitCount + 7) / 8 - - if charnum > s.symbolLen { - return errors.New("internal error: charnum > s.symbolLen") - } - s.Out = out[:outP] - return nil -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaFindState int32 - deltaNbBits uint32 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *Scratch) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *Scratch) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [maxSymbolValue + 2]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = int32(total - 1) - total++ - default: - maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = int32(total - v) - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int) { - for _, v := range in { - s.count[v]++ - } - m, symlen := uint32(0), s.symbolLen - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - symlen = uint16(i) + 1 - } - s.symbolLen = symlen - return int(m) -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 - minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > maxTableLog { - tableLog = maxTableLog - } - s.actualTableLog = tableLog -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -func (s *Scratch) normalizeCount() error { - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(s.br.remain()) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(s.br.remain() >> tableLog) - ) - - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - return s.normalizeCount2() - } - s.norm[largest] += stillToDistribute - return nil -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *Scratch) normalizeCount2() error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(s.br.remain()) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// validateNorm validates the normalized histogram table. -func (s *Scratch) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 { - if previous0 { - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - n0 += 24 - if b.off < iend-5 { - b.advance(2) - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 16 - bitCount += 16 - } - } - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - for charnum < n0 { - s.norm[charnum&0xff] = 0 - charnum++ - } - - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*(threshold) - 1) - (remaining) - var count int32 - - if (int32(bitStream) & (threshold - 1)) < max { - count = int32(bitStream) & (threshold - 1) - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - count-- // extra accuracy - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - } - bitStream = b.Uint32() >> (bitCount & 31) - } - s.symbolLen = charnum - - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return nil -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -type decSymbol struct { - newState uint16 - symbol uint8 - nbBits uint8 -} - -// allocDtable will allocate decoding tables if they are not big enough. -func (s *Scratch) allocDtable() { - tableSize := 1 << s.actualTableLog - if cap(s.decTable) < tableSize { - s.decTable = make([]decSymbol, tableSize) - } - s.decTable = s.decTable[:tableSize] - - if cap(s.ct.tableSymbol) < 256 { - s.ct.tableSymbol = make([]byte, 256) - } - s.ct.tableSymbol = s.ct.tableSymbol[:256] - - if cap(s.ct.stateTable) < 256 { - s.ct.stateTable = make([]uint16, 256) - } - s.ct.stateTable = s.ct.stateTable[:256] -} - -// buildDtable will build the decoding table. -func (s *Scratch) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - s.allocDtable() - symbolNext := s.ct.stateTable[:256] - - // Init, lay down lowprob symbols - s.zeroBits = false - { - largeLimit := int16(1 << (s.actualTableLog - 1)) - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.decTable[highThreshold].symbol = uint8(i) - highThreshold-- - symbolNext[i] = 1 - } else { - if v >= largeLimit { - s.zeroBits = true - } - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.decTable[position].symbol = uint8(ss) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.decTable { - symbol := v.symbol - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.decTable[u].nbBits = nBits - newState := (nextState << nBits) - tableSize - if newState >= tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.decTable[u].newState = newState - } - } - return nil -} - -// decompress will decompress the bitstream. -// If the buffer is over-read an error is returned. -func (s *Scratch) decompress() error { - br := &s.bits - if err := br.init(s.br.unread()); err != nil { - return err - } - - var s1, s2 decoder - // Initialize and decode first state and symbol. - s1.init(br, s.decTable, s.actualTableLog) - s2.init(br, s.decTable, s.actualTableLog) - - // Use temp table to avoid bound checks/append penalty. - var tmp = s.ct.tableSymbol[:256] - var off uint8 - - // Main part - if !s.zeroBits { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.nextFast() - tmp[off+1] = s2.nextFast() - br.fillFast() - tmp[off+2] = s1.nextFast() - tmp[off+3] = s2.nextFast() - off += 4 - // When off is 0, we have overflowed and should write. - if off == 0 { - s.Out = append(s.Out, tmp...) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } else { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.next() - tmp[off+1] = s2.next() - br.fillFast() - tmp[off+2] = s1.next() - tmp[off+3] = s2.next() - off += 4 - if off == 0 { - s.Out = append(s.Out, tmp...) - // When off is 0, we have overflowed and should write. - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } - s.Out = append(s.Out, tmp[:off]...) - - // Final bits, a bit more expensive check - for { - if s1.finished() { - s.Out = append(s.Out, s1.final(), s2.final()) - break - } - br.fill() - s.Out = append(s.Out, s1.next()) - if s2.finished() { - s.Out = append(s.Out, s2.final(), s1.final()) - break - } - s.Out = append(s.Out, s2.next()) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - return br.close() -} - -// decoder keeps track of the current state and updates it from the bitstream. -type decoder struct { - state uint16 - br *bitReader - dt []decSymbol -} - -// init will initialize the decoder and read the first state from the stream. -func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { - d.dt = dt - d.br = in - d.state = in.getBits(tableLog) -} - -// next returns the next symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) next() uint8 { - n := &d.dt[d.state] - lowBits := d.br.getBits(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (d *decoder) finished() bool { - return d.br.finished() && d.dt[d.state].nbBits > 0 -} - -// final returns the current state symbol without decoding the next. -func (d *decoder) final() uint8 { - return d.dt[d.state].symbol -} - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) nextFast() uint8 { - n := d.dt[d.state] - lowBits := d.br.getBitsFast(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go deleted file mode 100644 index 535cbadfde..0000000000 --- a/vendor/github.com/klauspost/compress/fse/fse.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -// Package fse provides Finite State Entropy encoding and decoding. -// -// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding -// for byte blocks as implemented in zstd. -// -// See https://github.com/klauspost/compress/tree/master/fse for more information. -package fse - -import ( - "errors" - "fmt" - "math/bits" -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = 14 - defaultMemoryUsage = 13 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - defaultTablelog = defaultMemoryUsage - 2 - minTablelog = 5 - maxSymbolValue = 255 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") -) - -// Scratch provides temporary storage for compression and decompression. -type Scratch struct { - // Private - count [maxSymbolValue + 1]uint32 - norm [maxSymbolValue + 1]int16 - br byteReader - bits bitReader - bw bitWriter - ct cTable // Compression tables. - decTable []decSymbol // Decompression table. - maxCount int // count of the most probable symbol - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // DecompressLimit limits the maximum decoded size acceptable. - // If > 0 decompression will stop when approximately this many bytes - // has been decoded. - // If 0, maximum size will be 2GB. - DecompressLimit int - - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - TableLog uint8 -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *Scratch) Histogram() []uint32 { - return s.count[:] -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = 255 - } - if s.TableLog == 0 { - s.TableLog = defaultTablelog - } - if s.TableLog > maxTableLog { - return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - s.br.init(in) - if s.DecompressLimit == 0 { - // Max size 2GB. - s.DecompressLimit = (2 << 30) - 1 - } - - return s, nil -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh deleted file mode 100644 index aff942205f..0000000000 --- a/vendor/github.com/klauspost/compress/gen.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cd s2/cmd/_s2sx/ || exit 1 -go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore deleted file mode 100644 index b3d262958f..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md deleted file mode 100644 index 8b6e5c6638..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# Huff0 entropy compression - -This package provides Huff0 encoding and decoding as used in zstd. - -[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), -a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU -(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) - -## News - -This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. - -This ensures that most functionality is well tested. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and -[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | -| `(error)` | An internal error occurred. | - - -As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. - -## Tables and re-use - -Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. - -The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) -that controls this behaviour. See the documentation for details. This can be altered between each block. - -Do however note that this information is *not* stored in the output block and it is up to the users of the package to -record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, -based on the boolean reported back from the CompressXX call. - -If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the -[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. - -## Decompressing - -The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). -This will initialize the decoding tables. -You can supply the complete block to `ReadTable` and it will return the data part of the block -which can be given to the decompressor. - -Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) -or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. - -For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. - -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go deleted file mode 100644 index e36d9742f9..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderBytes struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderBytes) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderBytes) peekByteFast() uint8 { - got := uint8(b.value >> 56) - return got -} - -func (b *bitReaderBytes) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderBytes) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. -func (b *bitReaderBytes) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderBytes) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderBytes) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -func (b *bitReaderBytes) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderBytes) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -// bitReaderShifted reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderShifted struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderShifted) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { - return uint16(b.value >> ((64 - n) & 63)) -} - -func (b *bitReaderShifted) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderShifted) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. -func (b *bitReaderShifted) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderShifted) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) - b.bitsRead -= 8 - b.off-- - } -} - -func (b *bitReaderShifted) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderShifted) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go deleted file mode 100644 index 0ebc9aaac7..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encSymbol(ct cTable, symbol byte) { - enc := ct[symbol] - b.bitContainer |= uint64(enc.val) << (b.nBits & 63) - if false { - if enc.nBits == 0 { - panic("nbits 0") - } - } - b.nBits += enc.nBits -} - -// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { - encA := ct[av] - encB := ct[bv] - sh := b.nBits & 63 - combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) - b.bitContainer |= combined << sh - if false { - if encA.nBits == 0 { - panic("nbitsA 0") - } - if encB.nBits == 0 { - panic("nbitsB 0") - } - } - b.nBits += encA.nBits + encB.nBits -} - -// encFourSymbols adds up to 32 bits from four symbols. -// It will not check if there is space for them, -// so the caller must ensure that b has been flushed recently. -func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { - bitsA := encA.nBits - bitsB := bitsA + encB.nBits - bitsC := bitsB + encC.nBits - bitsD := bitsC + encD.nBits - combined := uint64(encA.val) | - (uint64(encB.val) << (bitsA & 63)) | - (uint64(encC.val) << (bitsB & 63)) | - (uint64(encD.val) << (bitsC & 63)) - b.bitContainer |= combined << (b.nBits & 63) - b.nBits += bitsD -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go deleted file mode 100644 index 4dcab8d232..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - v3 := int32(b.b[b.off+3]) - v2 := int32(b.b[b.off+2]) - v1 := int32(b.b[b.off+1]) - v0 := int32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - v3 := uint32(b.b[b.off+3]) - v2 := uint32(b.b[b.off+2]) - v1 := uint32(b.b[b.off+1]) - v0 := uint32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go deleted file mode 100644 index 518436cf3d..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ /dev/null @@ -1,741 +0,0 @@ -package huff0 - -import ( - "fmt" - "math" - "runtime" - "sync" -) - -// Compress1X will compress the input. -// The output can be decoded using Decompress1X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - return compress(in, s, s.compress1X) -} - -// Compress4X will compress the input. The input is split into 4 independent blocks -// and compressed similar to Compress1X. -// The output can be decoded using Decompress4X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - if false { - // TODO: compress4Xp only slightly faster. - const parallelThreshold = 8 << 10 - if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { - return compress(in, s, s.compress4X) - } - return compress(in, s, s.compress4Xp) - } - return compress(in, s, s.compress4X) -} - -func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { - // Nuke previous table if we cannot reuse anyway. - if s.Reuse == ReusePolicyNone { - s.prevTable = s.prevTable[:0] - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return nil, false, ErrIncompressible - } - // One symbol, use RLE - return nil, false, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, false, ErrIncompressible - } - if s.Reuse == ReusePolicyMust && !canReuse { - // We must reuse, but we can't. - return nil, false, ErrIncompressible - } - if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { - keepTable := s.cTable - keepTL := s.actualTableLog - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - s.cTable = keepTable - s.actualTableLog = keepTL - if err == nil && len(s.Out) < wantSize { - s.OutData = s.Out - return s.Out, true, nil - } - if s.Reuse == ReusePolicyMust { - return nil, false, ErrIncompressible - } - // Do not attempt to re-use later. - s.prevTable = s.prevTable[:0] - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return nil, false, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - if s.Reuse == ReusePolicyAllow && canReuse { - hSize := len(s.Out) - oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) - newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) - if oldSize <= hSize+newSize || hSize+12 >= wantSize { - // Retain cTable even if we re-use. - keepTable := s.cTable - keepTL := s.actualTableLog - - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - - // Restore ctable. - s.cTable = keepTable - s.actualTableLog = keepTL - if err != nil { - return nil, false, err - } - if len(s.Out) >= wantSize { - return nil, false, ErrIncompressible - } - s.OutData = s.Out - return s.Out, true, nil - } - } - - // Use new table - err = s.cTable.write(s) - if err != nil { - s.OutTable = nil - return nil, false, err - } - s.OutTable = s.Out - - // Compress using new table - s.Out, err = compressor(in) - if err != nil { - s.OutTable = nil - return nil, false, err - } - if len(s.Out) >= wantSize { - s.OutTable = nil - return nil, false, ErrIncompressible - } - // Move current table into previous. - s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] - s.OutData = s.Out[len(s.OutTable):] - return s.Out, false, nil -} - -// EstimateSizes will estimate the data sizes -func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { - s, err = s.prepare(in) - if err != nil { - return 0, 0, 0, err - } - - // Create histogram, if none was provided. - tableSz, dataSz, reuseSz = -1, -1, -1 - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return 0, 0, 0, ErrIncompressible - } - // One symbol, use RLE - return 0, 0, 0, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return 0, 0, 0, ErrIncompressible - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return 0, 0, 0, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - tableSz, err = s.cTable.estTableSize(s) - if err != nil { - return 0, 0, 0, err - } - if canReuse { - reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) - } - dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) - - // Restore - return tableSz, dataSz, reuseSz, nil -} - -func (s *Scratch) compress1X(src []byte) ([]byte, error) { - return s.compress1xDo(s.Out, src), nil -} - -func (s *Scratch) compress1xDo(dst, src []byte) []byte { - var bw = bitWriter{out: dst} - - // N is length divisible by 4. - n := len(src) - n -= n & 3 - cTable := s.cTable[:256] - - // Encode last bytes. - for i := len(src) & 3; i > 0; i-- { - bw.encSymbol(cTable, src[n+i-1]) - } - n -= 4 - if s.actualTableLog <= 8 { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) - } - } else { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.flush32() - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) - } - } - bw.close() - return bw.out -} - -var sixZeros [6]byte - -func (s *Scratch) compress4X(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - segmentSize := (len(src) + 3) / 4 - - // Add placeholder for output length - offsetIdx := len(s.Out) - s.Out = append(s.Out, sixZeros[:]...) - - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - idx := len(s.Out) - s.Out = s.compress1xDo(s.Out, toDo) - if len(s.Out)-idx > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - length := len(s.Out) - idx - s.Out[i*2+offsetIdx] = byte(length) - s.Out[i*2+offsetIdx+1] = byte(length >> 8) - } - } - - return s.Out, nil -} - -// compress4Xp will compress 4 streams using separate goroutines. -func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - // Add placeholder for output length - s.Out = s.Out[:6] - - segmentSize := (len(src) + 3) / 4 - var wg sync.WaitGroup - wg.Add(4) - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - // Separate goroutine for each block. - go func(i int) { - s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) - wg.Done() - }(i) - } - wg.Wait() - for i := 0; i < 4; i++ { - o := s.tmpOut[i] - if len(o) > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - s.Out[i*2] = byte(len(o)) - s.Out[i*2+1] = byte(len(o) >> 8) - } - - // Write output. - s.Out = append(s.Out, o...) - } - return s.Out, nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { - reuse = true - for _, v := range in { - s.count[v]++ - } - m := uint32(0) - if len(s.prevTable) > 0 { - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - s.symbolLen = uint16(i) + 1 - if i >= len(s.prevTable) { - reuse = false - } else if s.prevTable[i].nBits == 0 { - reuse = false - } - } - return int(m), reuse - } - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - s.symbolLen = uint16(i) + 1 - } - return int(m), false -} - -func (s *Scratch) canUseTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 && c[i].nBits == 0 { - return false - } - } - return true -} - -//lint:ignore U1000 used for debugging -func (s *Scratch) validateTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 { - if c[i].nBits == 0 { - return false - } - if c[i].nBits > s.actualTableLog { - return false - } - } - } - return true -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.br.remain())) + 1 - minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > tableLogMax { - tableLog = tableLogMax - } - s.actualTableLog = tableLog -} - -type cTableEntry struct { - val uint16 - nBits uint8 - // We have 8 bits extra -} - -const huffNodesMask = huffNodesLen - 1 - -func (s *Scratch) buildCTable() error { - s.optimalTableLog() - s.huffSort() - if cap(s.cTable) < maxSymbolValue+1 { - s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) - } else { - s.cTable = s.cTable[:s.symbolLen] - for i := range s.cTable { - s.cTable[i] = cTableEntry{} - } - } - - var startNode = int16(s.symbolLen) - nonNullRank := s.symbolLen - 1 - - nodeNb := startNode - huffNode := s.nodes[1 : huffNodesLen+1] - - // This overlays the slice above, but allows "-1" index lookups. - // Different from reference implementation. - huffNode0 := s.nodes[0 : huffNodesLen+1] - - for huffNode[nonNullRank].count() == 0 { - nonNullRank-- - } - - lowS := int16(nonNullRank) - nodeRoot := nodeNb + lowS - 1 - lowN := nodeNb - huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) - huffNode[lowS].setParent(nodeNb) - huffNode[lowS-1].setParent(nodeNb) - nodeNb++ - lowS -= 2 - for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].setCount(1 << 30) - } - // fake entry, strong barrier - huffNode0[0].setCount(1 << 31) - - // create parents - for nodeNb <= nodeRoot { - var n1, n2 int16 - if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { - n1 = lowS - lowS-- - } else { - n1 = lowN - lowN++ - } - if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { - n2 = lowS - lowS-- - } else { - n2 = lowN - lowN++ - } - - huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) - huffNode0[n1+1].setParent(nodeNb) - huffNode0[n2+1].setParent(nodeNb) - nodeNb++ - } - - // distribute weights (unlimited tree height) - huffNode[nodeRoot].setNbBits(0) - for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) - } - for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) - } - s.actualTableLog = s.setMaxHeight(int(nonNullRank)) - maxNbBits := s.actualTableLog - - // fill result into tree (val, nbBits) - if maxNbBits > tableLogMax { - return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) - } - var nbPerRank [tableLogMax + 1]uint16 - var valPerRank [16]uint16 - for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits()]++ - } - // determine stating value per rank - { - min := uint16(0) - for n := maxNbBits; n > 0; n-- { - // get starting value within each rank - valPerRank[n] = min - min += nbPerRank[n] - min >>= 1 - } - } - - // push nbBits per symbol, symbol order - for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol()].nBits = v.nbBits() - } - - // assign value within rank, symbol order - t := s.cTable[:s.symbolLen] - for n, val := range t { - nbits := val.nBits & 15 - v := valPerRank[nbits] - t[n].val = v - valPerRank[nbits] = v + 1 - } - - return nil -} - -// huffSort will sort symbols, decreasing order. -func (s *Scratch) huffSort() { - type rankPos struct { - base uint32 - current uint32 - } - - // Clear nodes - nodes := s.nodes[:huffNodesLen+1] - s.nodes = nodes - nodes = nodes[1 : huffNodesLen+1] - - // Sort into buckets based on length of symbol count. - var rank [32]rankPos - for _, v := range s.count[:s.symbolLen] { - r := highBit32(v+1) & 31 - rank[r].base++ - } - // maxBitLength is log2(BlockSizeMax) + 1 - const maxBitLength = 18 + 1 - for n := maxBitLength; n > 0; n-- { - rank[n-1].base += rank[n].base - } - for n := range rank[:maxBitLength] { - rank[n].current = rank[n].base - } - for n, c := range s.count[:s.symbolLen] { - r := (highBit32(c+1) + 1) & 31 - pos := rank[r].current - rank[r].current++ - prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count() { - nodes[pos&huffNodesMask] = prev - pos-- - prev = nodes[(pos-1)&huffNodesMask] - } - nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) - } -} - -func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { - maxNbBits := s.actualTableLog - huffNode := s.nodes[1 : huffNodesLen+1] - //huffNode = huffNode[: huffNodesLen] - - largestBits := huffNode[lastNonNull].nbBits() - - // early exit : no elt > maxNbBits - if largestBits <= maxNbBits { - return largestBits - } - totalCost := int(0) - baseCost := int(1) << (largestBits - maxNbBits) - n := uint32(lastNonNull) - - for huffNode[n].nbBits() > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) - huffNode[n].setNbBits(maxNbBits) - n-- - } - // n stops at huffNode[n].nbBits <= maxNbBits - - for huffNode[n].nbBits() == maxNbBits { - n-- - } - // n end at index of smallest symbol using < maxNbBits - - // renorm totalCost - totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ - - // repay normalized cost - { - const noSymbol = 0xF0F0F0F0 - var rankLast [tableLogMax + 2]uint32 - - for i := range rankLast[:] { - rankLast[i] = noSymbol - } - - // Get pos of last (smallest) symbol per rank - { - currentNbBits := maxNbBits - for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits() >= currentNbBits { - continue - } - currentNbBits = huffNode[pos].nbBits() // < maxNbBits - rankLast[maxNbBits-currentNbBits] = uint32(pos) - } - } - - for totalCost > 0 { - nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 - - for ; nBitsToDecrease > 1; nBitsToDecrease-- { - highPos := rankLast[nBitsToDecrease] - lowPos := rankLast[nBitsToDecrease-1] - if highPos == noSymbol { - continue - } - if lowPos == noSymbol { - break - } - highTotal := huffNode[highPos].count() - lowTotal := 2 * huffNode[lowPos].count() - if highTotal <= lowTotal { - break - } - } - // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) - // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary - // FIXME: try to remove - for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { - nBitsToDecrease++ - } - totalCost -= 1 << (nBitsToDecrease - 1) - if rankLast[nBitsToDecrease-1] == noSymbol { - // this rank is no longer empty - rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] - } - huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + - huffNode[rankLast[nBitsToDecrease]].nbBits()) - if rankLast[nBitsToDecrease] == 0 { - /* special case, reached largest symbol */ - rankLast[nBitsToDecrease] = noSymbol - } else { - rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { - rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ - } - } - } - - for totalCost < 0 { /* Sometimes, cost correction overshoot */ - if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits() == maxNbBits { - n-- - } - huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) - rankLast[1] = n + 1 - totalCost++ - continue - } - huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) - rankLast[1]++ - totalCost++ - } - } - return maxNbBits -} - -// A nodeElt is the fields -// -// count uint32 -// parent uint16 -// symbol byte -// nbBits uint8 -// -// in some order, all squashed into an integer so that the compiler -// always loads and stores entire nodeElts instead of separate fields. -type nodeElt uint64 - -func makeNodeElt(count uint32, symbol byte) nodeElt { - return nodeElt(count) | nodeElt(symbol)<<48 -} - -func (e *nodeElt) count() uint32 { return uint32(*e) } -func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } -func (e *nodeElt) symbol() byte { return byte(*e >> 48) } -func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } - -func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } -func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } -func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go deleted file mode 100644 index 54bd08b25c..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ /dev/null @@ -1,1167 +0,0 @@ -package huff0 - -import ( - "errors" - "fmt" - "io" - "sync" - - "github.com/klauspost/compress/fse" -) - -type dTable struct { - single []dEntrySingle -} - -// single-symbols decoding -type dEntrySingle struct { - entry uint16 -} - -// Uses special code for all tables that are < 8 bits. -const use8BitTables = true - -// ReadTable will read a table from the input. -// The size of the input may be larger than the table definition. -// Any content remaining after the table definition will be returned. -// If no Scratch is provided a new one is allocated. -// The returned Scratch can be used for encoding or decoding input using this table. -func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(nil) - if err != nil { - return s, nil, err - } - if len(in) <= 1 { - return s, nil, errors.New("input too small for table") - } - iSize := in[0] - in = in[1:] - if iSize >= 128 { - // Uncompressed - oSize := iSize - 127 - iSize = (oSize + 1) / 2 - if int(iSize) > len(in) { - return s, nil, errors.New("input too small for table") - } - for n := uint8(0); n < oSize; n += 2 { - v := in[n/2] - s.huffWeight[n] = v >> 4 - s.huffWeight[n+1] = v & 15 - } - s.symbolLen = uint16(oSize) - in = in[iSize:] - } else { - if len(in) < int(iSize) { - return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) - } - // FSE compressed weights - s.fse.DecompressLimit = 255 - hw := s.huffWeight[:] - s.fse.Out = hw - b, err := fse.Decompress(in[:iSize], s.fse) - s.fse.Out = nil - if err != nil { - return s, nil, fmt.Errorf("fse decompress returned: %w", err) - } - if len(b) > 255 { - return s, nil, errors.New("corrupt input: output table too large") - } - s.symbolLen = uint16(len(b)) - in = in[iSize:] - } - - // collect weight stats - var rankStats [16]uint32 - weightTotal := uint32(0) - for _, v := range s.huffWeight[:s.symbolLen] { - if v > tableLogMax { - return s, nil, errors.New("corrupt input: weight too large") - } - v2 := v & 15 - rankStats[v2]++ - // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. - weightTotal += (1 << v2) >> 1 - } - if weightTotal == 0 { - return s, nil, errors.New("corrupt input: weights zero") - } - - // get last non-null symbol weight (implied, total must be 2^n) - { - tableLog := highBit32(weightTotal) + 1 - if tableLog > tableLogMax { - return s, nil, errors.New("corrupt input: tableLog too big") - } - s.actualTableLog = uint8(tableLog) - // determine last weight - { - total := uint32(1) << tableLog - rest := total - weightTotal - verif := uint32(1) << highBit32(rest) - lastWeight := highBit32(rest) + 1 - if verif != rest { - // last value must be a clean power of 2 - return s, nil, errors.New("corrupt input: last value not power of two") - } - s.huffWeight[s.symbolLen] = uint8(lastWeight) - s.symbolLen++ - rankStats[lastWeight]++ - } - } - - if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { - // by construction : at least 2 elts of rank 1, must be even - return s, nil, errors.New("corrupt input: min elt size, even check failed ") - } - - // TODO: Choose between single/double symbol decoding - - // Calculate starting value for each rank - { - var nextRankStart uint32 - for n := uint8(1); n < s.actualTableLog+1; n++ { - current := nextRankStart - nextRankStart += rankStats[n] << (n - 1) - rankStats[n] = current - } - } - - // fill DTable (always full size) - tSize := 1 << tableLogMax - if len(s.dt.single) != tSize { - s.dt.single = make([]dEntrySingle, tSize) - } - cTable := s.prevTable - if cap(cTable) < maxSymbolValue+1 { - cTable = make([]cTableEntry, 0, maxSymbolValue+1) - } - cTable = cTable[:maxSymbolValue+1] - s.prevTable = cTable[:s.symbolLen] - s.prevTableLog = s.actualTableLog - - for n, w := range s.huffWeight[:s.symbolLen] { - if w == 0 { - cTable[n] = cTableEntry{ - val: 0, - nBits: 0, - } - continue - } - length := (uint32(1) << w) >> 1 - d := dEntrySingle{ - entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), - } - - rank := &rankStats[w] - cTable[n] = cTableEntry{ - val: uint16(*rank >> (w - 1)), - nBits: uint8(d.entry), - } - - single := s.dt.single[*rank : *rank+length] - for i := range single { - single[i] = d - } - *rank += length - } - - return s, in, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { - if cap(s.Out) < s.MaxDecodedSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:s.MaxDecodedSize] - s.Out, err = s.Decoder().Decompress1X(s.Out, in) - return s.Out, err -} - -// Decompress4X will decompress a 4X encoded stream. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// The length of the supplied input must match the end of a block exactly. -// The destination size of the uncompressed data must be known and provided. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { - if dstSize > s.MaxDecodedSize { - return nil, ErrMaxDecodedSizeExceeded - } - if cap(s.Out) < dstSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:dstSize] - s.Out, err = s.Decoder().Decompress4X(s.Out, in) - return s.Out, err -} - -// Decoder will return a stateless decoder that can be used by multiple -// decompressors concurrently. -// Before this is called, the table must be initialized with ReadTable. -// The Decoder is still linked to the scratch buffer so that cannot be reused. -// However, it is safe to discard the scratch. -func (s *Scratch) Decoder() *Decoder { - return &Decoder{ - dt: s.dt, - actualTableLog: s.actualTableLog, - bufs: &s.decPool, - } -} - -// Decoder provides stateless decoding. -type Decoder struct { - dt dTable - actualTableLog uint8 - bufs *sync.Pool -} - -func (d *Decoder) buffer() *[4][256]byte { - buf, ok := d.bufs.Get().(*[4][256]byte) - if ok { - return buf - } - return &[4][256]byte{} -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress1X8BitExactly(dst, src) - } - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - switch d.actualTableLog { - case 8: - const shift = 0 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 7: - const shift = 8 - 7 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 6: - const shift = 8 - 6 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 5: - const shift = 8 - 5 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 4: - const shift = 8 - 4 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 3: - const shift = 8 - 3 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 2: - const shift = 8 - 2 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 1: - const shift = 8 - 1 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - default: - d.bufs.Put(bufs) - return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - shift := (8 - d.actualTableLog) & 7 - - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()>>shift] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - const shift = 56 - - //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress4X8bitExactly(dst, src) - } - - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - shift := (56 + (8 - d.actualTableLog)) & 63 - - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[uint8(br.value>>shift)].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const shift = 56 - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - // copy(out[dstEvery*3:], buf[3][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[br.peekByteFast()].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// matches will compare a decoding table to a coding table. -// Errors are written to the writer. -// Nothing will be written if table is ok. -func (s *Scratch) matches(ct cTable, w io.Writer) { - if s == nil || len(s.dt.single) == 0 { - return - } - dt := s.dt.single[:1<>8) == byte(sym) { - fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) - errs++ - break - } - } - if errs == 0 { - broken-- - } - continue - } - // Unused bits in input - ub := tablelog - enc.nBits - top := enc.val << ub - // decoder looks at top bits. - dec := dt[top] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) - continue - } - // Ensure that all combinations are covered. - for i := uint16(0); i < (1 << ub); i++ { - vval := top | i - dec := dt[vval] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) - break - } - } - if errs == 0 { - ok++ - broken-- - } - } - if broken > 0 { - fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) - } -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go deleted file mode 100644 index ba7e8e6b02..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ /dev/null @@ -1,226 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// This file contains the specialisation of Decoder.Decompress4X -// and Decoder.Decompress1X that use an asm implementation of thir main loops. -package huff0 - -import ( - "errors" - "fmt" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog > 8. -// -//go:noescape -func decompress4x_main_loop_amd64(ctx *decompress4xContext) - -// decompress4x_8b_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog <= 8 which decodes 4 entries -// per loop. -// -//go:noescape -func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) - -// fallback8BitSize is the size where using Go version is faster. -const fallback8BitSize = 800 - -type decompress4xContext struct { - pbr *[4]bitReaderShifted - peekBits uint8 - out *byte - dstEvery int - tbl *dEntrySingle - decoded int - limit *byte -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - - use8BitTables := d.actualTableLog <= 8 - if cap(dst) < fallback8BitSize && use8BitTables { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - var decoded int - - if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { - ctx := decompress4xContext{ - pbr: &br, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - out: &out[0], - dstEvery: dstEvery, - tbl: &single[0], - limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. - } - if use8BitTables { - decompress4x_8b_main_loop_amd64(&ctx) - } else { - decompress4x_main_loop_amd64(&ctx) - } - - decoded = ctx.decoded - out = out[decoded/4:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress1X when tablelog > 8. -// -//go:noescape -func decompress1x_main_loop_amd64(ctx *decompress1xContext) - -// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation -// of Decompress1X when tablelog > 8. -// -//go:noescape -func decompress1x_main_loop_bmi2(ctx *decompress1xContext) - -type decompress1xContext struct { - pbr *bitReaderShifted - peekBits uint8 - out *byte - outCap int - tbl *dEntrySingle - decoded int -} - -// Error reported by asm implementations -const error_max_decoded_size_exeeded = -1 - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:maxDecodedSize] - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - - if maxDecodedSize >= 4 { - ctx := decompress1xContext{ - pbr: &br, - out: &dst[0], - outCap: maxDecodedSize, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - tbl: &d.dt.single[0], - } - - if cpuinfo.HasBMI2() { - decompress1x_main_loop_bmi2(&ctx) - } else { - decompress1x_main_loop_amd64(&ctx) - } - if ctx.decoded == error_max_decoded_size_exeeded { - return nil, ErrMaxDecodedSizeExceeded - } - - dst = dst[:ctx.decoded] - } - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s deleted file mode 100644 index c4c7ab2d1f..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ /dev/null @@ -1,830 +0,0 @@ -// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. - -//go:build amd64 && !appengine && !noasm && gc - -// func decompress4x_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - // Preload values - MOVQ ctx+0(FP), AX - MOVBQZX 8(AX), DI - MOVQ 16(AX), BX - MOVQ 48(AX), SI - MOVQ 24(AX), R8 - MOVQ 32(AX), R9 - MOVQ (AX), R10 - - // Main loop -main_loop: - XORL DX, DX - CMPQ BX, SI - SETGE DL - - // br0.fillFast32() - MOVQ 32(R10), R11 - MOVBQZX 40(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill0 - MOVQ 24(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ (R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 24(R10) - ORQ R13, R11 - - // exhausted += (br0.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br0.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX) - - // update the bitreader structure - MOVQ R11, 32(R10) - MOVB R12, 40(R10) - - // br1.fillFast32() - MOVQ 80(R10), R11 - MOVBQZX 88(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill1 - MOVQ 72(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 48(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 72(R10) - ORQ R13, R11 - - // exhausted += (br1.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br1.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX)(R8*1) - - // update the bitreader structure - MOVQ R11, 80(R10) - MOVB R12, 88(R10) - - // br2.fillFast32() - MOVQ 128(R10), R11 - MOVBQZX 136(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill2 - MOVQ 120(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 96(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 120(R10) - ORQ R13, R11 - - // exhausted += (br2.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br2.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX)(R8*2) - - // update the bitreader structure - MOVQ R11, 128(R10) - MOVB R12, 136(R10) - - // br3.fillFast32() - MOVQ 176(R10), R11 - MOVBQZX 184(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill3 - MOVQ 168(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 144(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 168(R10) - ORQ R13, R11 - - // exhausted += (br3.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br3.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - LEAQ (R8)(R8*2), CX - MOVW AX, (BX)(CX*1) - - // update the bitreader structure - MOVQ R11, 176(R10) - MOVB R12, 184(R10) - ADDQ $0x02, BX - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), BX - SHLQ $0x02, BX - MOVQ BX, 40(AX) - RET - -// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - // Preload values - MOVQ ctx+0(FP), CX - MOVBQZX 8(CX), DI - MOVQ 16(CX), BX - MOVQ 48(CX), SI - MOVQ 24(CX), R8 - MOVQ 32(CX), R9 - MOVQ (CX), R10 - - // Main loop -main_loop: - XORL DX, DX - CMPQ BX, SI - SETGE DL - - // br0.fillFast32() - MOVQ 32(R10), R11 - MOVBQZX 40(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill0 - MOVQ 24(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ (R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 24(R10) - ORQ R14, R11 - - // exhausted += (br0.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX) - - // update the bitreader structure - MOVQ R11, 32(R10) - MOVB R12, 40(R10) - - // br1.fillFast32() - MOVQ 80(R10), R11 - MOVBQZX 88(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill1 - MOVQ 72(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 48(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 72(R10) - ORQ R14, R11 - - // exhausted += (br1.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX)(R8*1) - - // update the bitreader structure - MOVQ R11, 80(R10) - MOVB R12, 88(R10) - - // br2.fillFast32() - MOVQ 128(R10), R11 - MOVBQZX 136(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill2 - MOVQ 120(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 96(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 120(R10) - ORQ R14, R11 - - // exhausted += (br2.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX)(R8*2) - - // update the bitreader structure - MOVQ R11, 128(R10) - MOVB R12, 136(R10) - - // br3.fillFast32() - MOVQ 176(R10), R11 - MOVBQZX 184(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill3 - MOVQ 168(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 144(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 168(R10) - ORQ R14, R11 - - // exhausted += (br3.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - LEAQ (R8)(R8*2), CX - MOVL AX, (BX)(CX*1) - - // update the bitreader structure - MOVQ R11, 176(R10) - MOVB R12, 184(R10) - ADDQ $0x04, BX - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), BX - SHLQ $0x02, BX - MOVQ BX, 40(AX) - RET - -// func decompress1x_main_loop_amd64(ctx *decompress1xContext) -TEXT ·decompress1x_main_loop_amd64(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exceeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exceeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_1_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_2_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exceeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET - -// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) -// Requires: BMI2 -TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exceeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exceeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_1_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_2_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exceeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go deleted file mode 100644 index 908c17de63..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go +++ /dev/null @@ -1,299 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// This file contains a generic implementation of Decoder.Decompress4X. -package huff0 - -import ( - "errors" - "fmt" -) - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - //copy(out[dstEvery*3:], buf[3][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go deleted file mode 100644 index e8ad17ad08..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ /dev/null @@ -1,337 +0,0 @@ -// Package huff0 provides fast huffman encoding as used in zstd. -// -// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. -package huff0 - -import ( - "errors" - "fmt" - "math" - "math/bits" - "sync" - - "github.com/klauspost/compress/fse" -) - -const ( - maxSymbolValue = 255 - - // zstandard limits tablelog to 11, see: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description - tableLogMax = 11 - tableLogDefault = 11 - minTablelog = 5 - huffNodesLen = 512 - - // BlockSizeMax is maximum input size for a single block uncompressed. - BlockSizeMax = 1<<18 - 1 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") - - // ErrTooBig is return if input is too large for a single block. - ErrTooBig = errors.New("input too big") - - // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. - ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") -) - -type ReusePolicy uint8 - -const ( - // ReusePolicyAllow will allow reuse if it produces smaller output. - ReusePolicyAllow ReusePolicy = iota - - // ReusePolicyPrefer will re-use aggressively if possible. - // This will not check if a new table will produce smaller output, - // except if the current table is impossible to use or - // compressed output is bigger than input. - ReusePolicyPrefer - - // ReusePolicyNone will disable re-use of tables. - // This is slightly faster than ReusePolicyAllow but may produce larger output. - ReusePolicyNone - - // ReusePolicyMust must allow reuse and produce smaller output. - ReusePolicyMust -) - -type Scratch struct { - count [maxSymbolValue + 1]uint32 - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // OutTable will contain the table data only, if a new table has been generated. - // Slice of the returned data. - OutTable []byte - - // OutData will contain the compressed data. - // Slice of the returned data. - OutData []byte - - // MaxDecodedSize will set the maximum allowed output size. - // This value will automatically be set to BlockSizeMax if not set. - // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. - MaxDecodedSize int - - br byteReader - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - // Must be <= 11 and >= 5. - TableLog uint8 - - // Reuse will specify the reuse policy - Reuse ReusePolicy - - // WantLogLess allows to specify a log 2 reduction that should at least be achieved, - // otherwise the block will be returned as incompressible. - // The reduction should then at least be (input size >> WantLogLess) - // If WantLogLess == 0 any improvement will do. - WantLogLess uint8 - - symbolLen uint16 // Length of active part of the symbol table. - maxCount int // count of the most probable symbol - clearCount bool // clear count - actualTableLog uint8 // Selected tablelog. - prevTableLog uint8 // Tablelog for previous table - prevTable cTable // Table used for previous compression. - cTable cTable // compression table - dt dTable // decompression table - nodes []nodeElt - tmpOut [4][]byte - fse *fse.Scratch - decPool sync.Pool // *[4][256]byte buffers. - huffWeight [maxSymbolValue + 1]byte -} - -// TransferCTable will transfer the previously used compression table. -func (s *Scratch) TransferCTable(src *Scratch) { - if cap(s.prevTable) < len(src.prevTable) { - s.prevTable = make(cTable, 0, maxSymbolValue+1) - } - s.prevTable = s.prevTable[:len(src.prevTable)] - copy(s.prevTable, src.prevTable) - s.prevTableLog = src.prevTableLog -} - -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if len(in) > BlockSizeMax { - return nil, ErrTooBig - } - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = maxSymbolValue - } - if s.TableLog == 0 { - s.TableLog = tableLogDefault - } - if s.TableLog > tableLogMax || s.TableLog < minTablelog { - return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) - } - if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { - s.MaxDecodedSize = BlockSizeMax - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - s.Out = s.Out[:0] - - s.OutTable = nil - s.OutData = nil - if cap(s.nodes) < huffNodesLen+1 { - s.nodes = make([]nodeElt, 0, huffNodesLen+1) - } - s.nodes = s.nodes[:0] - if s.fse == nil { - s.fse = &fse.Scratch{} - } - s.br.init(in) - - return s, nil -} - -type cTable []cTableEntry - -func (c cTable) write(s *Scratch) error { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - s.Out = append(s.Out, uint8(len(b))) - s.Out = append(s.Out, b...) - return nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return ErrIncompressible - } - op := s.Out - // special case, pack weights 4 bits/weight. - op = append(op, 128|(maxSymbolValue-1)) - // be sure it doesn't cause msan issue in final combination - huffWeight[maxSymbolValue] = 0 - for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { - op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) - } - s.Out = op - return nil -} - -func (c cTable) estTableSize(s *Scratch) (sz int, err error) { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - sz += 1 + len(b) - return sz, nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return 0, ErrIncompressible - } - // special case, pack weights 4 bits/weight. - sz += 1 + int(maxSymbolValue/2) - return sz, nil -} - -// estimateSize returns the estimated size in bytes of the input represented in the -// histogram supplied. -func (c cTable) estimateSize(hist []uint32) int { - nbBits := uint32(7) - for i, v := range c[:len(hist)] { - nbBits += uint32(v.nBits) * hist[i] - } - return int(nbBits >> 3) -} - -// minSize returns the minimum possible size considering the shannon limit. -func (s *Scratch) minSize(total int) int { - nbBits := float64(7) - fTotal := float64(total) - for _, v := range s.count[:s.symbolLen] { - n := float64(v) - if n > 0 { - nbBits += math.Log2(fTotal/n) * n - } - } - return int(nbBits) >> 3 -} - -func highBit32(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go deleted file mode 100644 index 3954c51219..0000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go +++ /dev/null @@ -1,34 +0,0 @@ -// Package cpuinfo gives runtime info about the current CPU. -// -// This is a very limited module meant for use internally -// in this project. For more versatile solution check -// https://github.com/klauspost/cpuid. -package cpuinfo - -// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. -func HasBMI1() bool { - return hasBMI1 -} - -// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. -func HasBMI2() bool { - return hasBMI2 -} - -// DisableBMI2 will disable BMI2, for testing purposes. -// Call returned function to restore previous state. -func DisableBMI2() func() { - old := hasBMI2 - hasBMI2 = false - return func() { - hasBMI2 = old - } -} - -// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. -func HasBMI() bool { - return HasBMI1() && HasBMI2() -} - -var hasBMI1 bool -var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go deleted file mode 100644 index e802579c4f..0000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package cpuinfo - -// go:noescape -func x86extensions() (bmi1, bmi2 bool) - -func init() { - hasBMI1, hasBMI2 = x86extensions() -} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s deleted file mode 100644 index 4465fbe9e9..0000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s +++ /dev/null @@ -1,36 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" -#include "funcdata.h" -#include "go_asm.h" - -TEXT ·x86extensions(SB), NOSPLIT, $0 - // 1. determine max EAX value - XORQ AX, AX - CPUID - - CMPQ AX, $7 - JB unsupported - - // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" - MOVQ $7, AX - MOVQ $0, CX - CPUID - - BTQ $3, BX // bit 3 = BMI1 - SETCS AL - - BTQ $8, BX // bit 8 = BMI2 - SETCS AH - - MOVB AL, bmi1+0(FP) - MOVB AH, bmi2+1(FP) - RET - -unsupported: - XORQ AX, AX - MOVB AL, bmi1+0(FP) - MOVB AL, bmi2+1(FP) - RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE deleted file mode 100644 index 6050c10f4c..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go deleted file mode 100644 index 40796a49d6..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Decode handles the Snappy block format, not the Snappy stream format. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -// -// Reader handles the Snappy stream format, not the Snappy block format. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -func (r *Reader) fill() error { - for r.i >= r.j { - if !r.readFull(r.buf[:4], true) { - return r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.decoded[:n], false) { - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return r.err - } - } - - return nil -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil -} - -// ReadByte satisfies the io.ByteReader interface. -func (r *Reader) ReadByte() (byte, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - c := r.decoded[r.i] - r.i++ - return c, nil -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go deleted file mode 100644 index 77395a6b8b..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset >= length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go deleted file mode 100644 index 13c6040a5d..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Encode handles the Snappy block format, not the Snappy stream format. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -// -// Writer handles the Snappy stream format, not the Snappy block format. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go deleted file mode 100644 index 2aa6a95a02..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// EncodeBlockInto exposes encodeBlock but checks dst size. -func EncodeBlockInto(dst, src []byte) (d int) { - if MaxEncodedLen(len(src)) > len(dst) { - return 0 - } - - // encodeBlock breaks on too big blocks, so split. - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return d -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go deleted file mode 100644 index 34d01f4aa6..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snapref implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snapref - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod deleted file mode 100644 index 2263853fca..0000000000 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ /dev/null @@ -1,4 +0,0 @@ -module github.com/klauspost/compress - -go 1.16 - diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md deleted file mode 100644 index bdd49c8b25..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ /dev/null @@ -1,441 +0,0 @@ -# zstd - -[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. -It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. -A high performance compression algorithm is implemented. For now focused on speed. - -This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. - -This package is pure Go and without use of "unsafe". - -The `zstd` package is provided as open source software using a Go standard license. - -Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. - -For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). - -## Installation - -Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. - -[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) - -## Compressor - -### Status: - -STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively -used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. - -There may still be specific combinations of data types/size/settings that could lead to edge cases, -so as always, testing is recommended. - -For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. - -* The "Fastest" compression ratio is roughly equivalent to zstd level 1. -* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). -* The "Better" compression ratio is roughly equivalent to zstd level 7. -* The "Best" compression ratio is roughly equivalent to zstd level 11. - -In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. -The compression ratio compared to stdlib is around level 3, but usually 3x as fast. - - -### Usage - -An Encoder can be used for either compressing a stream via the -`io.WriteCloser` interface supported by the Encoder or as multiple independent -tasks via the `EncodeAll` function. -Smaller encodes are encouraged to use the EncodeAll function. -Use `NewWriter` to create a new instance that can be used for both. - -To create a writer with default options, do like this: - -```Go -// Compress input to output. -func Compress(in io.Reader, out io.Writer) error { - enc, err := zstd.NewWriter(out) - if err != nil { - return err - } - _, err = io.Copy(enc, in) - if err != nil { - enc.Close() - return err - } - return enc.Close() -} -``` - -Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. -Even if your encode fails, you should still call `Close()` to release any resources that may be held up. - -The above is fine for big encodes. However, whenever possible try to *reuse* the writer. - -To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. -This will allow the encoder to reuse all resources and avoid wasteful allocations. - -Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part -of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change -in the future. So if you want to limit concurrency for future updates, specify the concurrency -you would like. - -If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` -which will compress input as each block is completed, blocking on writes until each has completed. - -You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined -compression settings can be specified. - -#### Future Compatibility Guarantees - -This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. - -The goal will be to keep the default efficiency at the default zstd (level 3). -However the encoding should never be assumed to remain the same, -and you should not use hashes of compressed output for similarity checks. - -The Encoder can be assumed to produce the same output from the exact same code version. -However, the may be modes in the future that break this, -although they will not be enabled without an explicit option. - -This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. - -Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), -[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) -and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). - -#### Blocks - -For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. - -`EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently. -Each call will only run on a same goroutine as the caller. - -Encoded blocks can be concatenated and the result will be the combined input stream. -Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. - -Especially when encoding blocks you should take special care to reuse the encoder. -This will effectively make it run without allocations after a warmup period. -To make it run completely without allocations, supply a destination buffer with space for all content. - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a writer that caches compressors. -// For this operation type we supply a nil Reader. -var encoder, _ = zstd.NewWriter(nil) - -// Compress a buffer. -// If you have a destination buffer, the allocation in the call can also be eliminated. -func Compress(src []byte) []byte { - return encoder.EncodeAll(src, make([]byte, 0, len(src))) -} -``` - -You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` -option when creating the writer. - -Using the Encoder for both a stream and individual blocks concurrently is safe. - -### Performance - -I have collected some speed examples to compare speed and compression against other compressors. - -* `file` is the input file. -* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. -* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". -* `insize`/`outsize` is the input/output size. -* `millis` is the number of milliseconds used for compression. -* `mb/s` is megabytes (2^20 bytes) per second. - -``` -Silesia Corpus: -http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip - -This package: -file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73821326 634 318.47 -silesia.tar zskp 2 211947520 67655404 1508 133.96 -silesia.tar zskp 3 211947520 64746933 3000 67.37 -silesia.tar zskp 4 211947520 60073508 16926 11.94 - -cgo zstd: -silesia.tar zstd 1 211947520 73605392 543 371.56 -silesia.tar zstd 3 211947520 66793289 864 233.68 -silesia.tar zstd 6 211947520 62916450 1913 105.66 -silesia.tar zstd 9 211947520 60212393 5063 39.92 - -gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1498 134.87 -silesia.tar gzkp 1 211947520 80088272 1009 200.31 - -GOB stream of binary data. Highly compressible. -https://files.klauspost.com/compress/gob-stream.7z - -file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 233948096 3230 564.34 -gob-stream zskp 2 1911399616 203997694 4997 364.73 -gob-stream zskp 3 1911399616 173526523 13435 135.68 -gob-stream zskp 4 1911399616 162195235 47559 38.33 - -gob-stream zstd 1 1911399616 249810424 2637 691.26 -gob-stream zstd 3 1911399616 208192146 3490 522.31 -gob-stream zstd 6 1911399616 193632038 6687 272.56 -gob-stream zstd 9 1911399616 177620386 16175 112.70 - -gob-stream gzstd 1 1911399616 357382013 9046 201.49 -gob-stream gzkp 1 1911399616 359136669 4885 373.08 - -The test data for the Large Text Compression Benchmark is the first -10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. -http://mattmahoney.net/dc/textdata.html - -file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343833605 3687 258.64 -enwik9 zskp 2 1000000000 317001237 7672 124.29 -enwik9 zskp 3 1000000000 291915823 15923 59.89 -enwik9 zskp 4 1000000000 261710291 77697 12.27 - -enwik9 zstd 1 1000000000 358072021 3110 306.65 -enwik9 zstd 3 1000000000 313734672 4784 199.35 -enwik9 zstd 6 1000000000 295138875 10290 92.68 -enwik9 zstd 9 1000000000 278348700 28549 33.40 - -enwik9 gzstd 1 1000000000 382578136 8608 110.78 -enwik9 gzkp 1 1000000000 382781160 5628 169.45 - -Highly compressible JSON file. -https://files.klauspost.com/compress/github-june-2days-2019.json.zst - -file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 -github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 -github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 -github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 - -github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 -github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 -github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 -github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 - -github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 -github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 - -VM Image, Linux mint with a few installed applications: -https://files.klauspost.com/compress/rawstudio-mint14.7z - -file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 -rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 -rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 -rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 - -rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 -rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 -rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 -rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 - -rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 -rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 - -CSV data: -https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst - -file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 -nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 -nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 -nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 - -nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 -nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 -nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 -nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 - -nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 -``` - -## Decompressor - -Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. - -This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), -kindly supplied by [fuzzit.dev](https://fuzzit.dev/). -The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, -or run it past its limits with ANY input provided. - -### Usage - -The package has been designed for two main usages, big streams of data and smaller in-memory buffers. -There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. - -For streaming use a simple setup could look like this: - -```Go -import "github.com/klauspost/compress/zstd" - -func Decompress(in io.Reader, out io.Writer) error { - d, err := zstd.NewReader(in) - if err != nil { - return err - } - defer d.Close() - - // Copy content... - _, err = io.Copy(out, d) - return err -} -``` - -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, -when running with default settings. -Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. - -Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. -However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data -as it is being requested only. - -For decoding buffers, it could look something like this: - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a reader that caches decompressors. -// For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) - -// Decompress a buffer. We don't supply a destination buffer, -// so it will be allocated by the decoder. -func Decompress(src []byte) ([]byte, error) { - return decoder.DecodeAll(src, nil) -} -``` - -Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. -By default 4 decompressors will be created. - -It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. -It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. - -### Dictionaries - -Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. - -Dictionaries are added individually to Decoders. -Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. -To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. -Several dictionaries can be added at once. - -The dictionary will be used automatically for the data that specifies them. -A re-used Decoder will still contain the dictionaries registered. - -When registering multiple dictionaries with the same ID, the last one will be used. - -It is possible to use dictionaries when compressing data. - -To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used -and it will likely be used even if it doesn't improve compression. - -The used dictionary must be used to decompress the content. - -For any real gains, the dictionary should be built with similar data. -If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. -Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. -For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). - -For now there is a fixed startup performance penalty for compressing content with dictionaries. -This will likely be improved over time. Just be aware to test performance when implementing. - -### Allocation-less operation - -The decoder has been designed to operate without allocations after a warmup. - -This means that you should *store* the decoder for best performance. -To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. -A decoder can safely be re-used even if the previous stream failed. - -To release the resources, you must call the `Close()` function on a decoder. -After this it can *no longer be reused*, but all running goroutines will be stopped. -So you *must* use this if you will no longer need the Reader. - -For decompressing smaller buffers a single decoder can be used. -When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. -In this case no unneeded allocations should be made. - -### Concurrency - -The buffer decoder does everything on the same goroutine and does nothing concurrently. -It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. - -The stream decoder will create goroutines that: - -1) Reads input and splits the input into blocks. -2) Decompression of literals. -3) Decompression of sequences. -4) Reconstruction of output stream. - -So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. - -The concurrency level will, for streams, determine how many blocks ahead the compression will start. - -Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. - -In practice this means that concurrency is often limited to utilizing about 3 cores effectively. - -### Benchmarks - -The first two are streaming decodes and the last are smaller inputs. - -Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. - -``` -BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op -BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op - -Concurrent blocks, performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op -``` - -This reflects the performance around May 2022, but this may be out of date. - -## Zstd inside ZIP files - -It is possible to use zstandard to compress individual files inside zip archives. -While this isn't widely supported it can be useful for internal files. - -To support the compression and decompression of these files you must register a compressor and decompressor. - -It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT -use the global registration functions. The main reason for this is that 2 registrations from -different packages will result in a panic. - -It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip -files concurrently, and using a single instance will allow reusing some resources. - -See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for -how to compress and decompress files inside zip archives. - -# Contributions - -Contributions are always welcome. -For new features/fixes, remember to add tests and for performance enhancements include benchmarks. - -For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). - -This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go deleted file mode 100644 index 25ca983941..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "math/bits" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - value uint64 // Maybe use [16]byte, but shifting is awkward. - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) int { - if n == 0 /*|| b.bitsRead >= 64 */ { - return 0 - } - return int(b.get32BitsFast(n)) -} - -// get32BitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) get32BitsFast(n uint8) uint32 { - const regMask = 64 - 1 - v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - v := b.in[len(b.in)-8:] - b.in = b.in[:len(b.in)-8] - b.value = binary.LittleEndian.Uint64(v) - b.bitsRead = 0 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if len(b.in) >= 4 { - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - return - } - - b.bitsRead -= uint8(8 * len(b.in)) - for len(b.in) > 0 { - b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) - b.in = b.in[:len(b.in)-1] - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return len(b.in) == 0 && b.bitsRead >= 64 -} - -// overread returns true if more bits have been requested than is on the stream. -func (b *bitReader) overread() bool { - return b.bitsRead > 64 -} - -// remain returns the number of bits remaining. -func (b *bitReader) remain() uint { - return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if !b.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go deleted file mode 100644 index 1952f175b0..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package zstd - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -var bitMask32 = [32]uint32{ - 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, - 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, - 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, -} // up to 32 bits - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits32NC will add up to 31 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits32NC(value uint32, bits uint8) { - b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits64NC will add up to 64 bits. -// There must be space for 32 bits. -func (b *bitWriter) addBits64NC(value uint64, bits uint8) { - if bits <= 31 { - b.addBits32Clean(uint32(value), bits) - return - } - b.addBits32Clean(uint32(value), 32) - b.flush32() - b.addBits32Clean(uint32(value>>32), bits-32) -} - -// addBits32Clean will add up to 32 bits. -// It will not check if there is space for them. -// The input must not contain more bits than specified. -func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go deleted file mode 100644 index 9f17ce601f..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ /dev/null @@ -1,726 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "hash/crc32" - "io" - "os" - "path/filepath" - "sync" - - "github.com/klauspost/compress/huff0" - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type blockType uint8 - -//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex - -const ( - blockTypeRaw blockType = iota - blockTypeRLE - blockTypeCompressed - blockTypeReserved -) - -type literalsBlockType uint8 - -const ( - literalsBlockRaw literalsBlockType = iota - literalsBlockRLE - literalsBlockCompressed - literalsBlockTreeless -) - -const ( - // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) - maxCompressedBlockSize = 128 << 10 - - compressedBlockOverAlloc = 16 - maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc - - // Maximum possible block size (all Raw+Uncompressed). - maxBlockSize = (1 << 21) - 1 - - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff - - // We support slightly less than the reference decoder to be able to - // use ints on 32 bit archs. - maxOffsetBits = 30 -) - -var ( - huffDecoderPool = sync.Pool{New: func() interface{} { - return &huff0.Scratch{} - }} - - fseDecoderPool = sync.Pool{New: func() interface{} { - return &fseDecoder{} - }} -) - -type blockDec struct { - // Raw source data of the block. - data []byte - dataStorage []byte - - // Destination of the decoded data. - dst []byte - - // Buffer for literals data. - literalBuf []byte - - // Window size of the block. - WindowSize uint64 - - err error - - // Check against this crc, if hasCRC is true. - checkCRC uint32 - hasCRC bool - - // Frame to use for singlethreaded decoding. - // Should not be used by the decoder itself since parent may be another frame. - localFrame *frameDec - - sequence []seqVals - - async struct { - newHist *history - literals []byte - seqData []byte - seqSize int // Size of uncompressed sequences - fcs uint64 - } - - // Block is RLE, this is the size. - RLESize uint32 - - Type blockType - - // Is this the last block of a frame? - Last bool - - // Use less memory - lowMem bool -} - -func (b *blockDec) String() string { - if b == nil { - return "" - } - return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) -} - -func newBlockDec(lowMem bool) *blockDec { - b := blockDec{ - lowMem: lowMem, - } - return &b -} - -// reset will reset the block. -// Input must be a start of a block and will be at the end of the block when returned. -func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { - b.WindowSize = windowSize - tmp, err := br.readSmall(3) - if err != nil { - println("Reading block header:", err) - return err - } - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - b.Last = bh&1 != 0 - b.Type = blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - maxSize := maxCompressedBlockSizeAlloc - switch b.Type { - case blockTypeReserved: - return ErrReservedBlockType - case blockTypeRLE: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - b.RLESize = uint32(cSize) - if b.lowMem { - maxSize = cSize - } - cSize = 1 - case blockTypeCompressed: - if debugDecoder { - println("Data size on stream:", cSize) - } - b.RLESize = 0 - maxSize = maxCompressedBlockSizeAlloc - if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + compressedBlockOverAlloc - } - if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { - if debugDecoder { - printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrCompressedSizeTooBig - } - // Empty compressed blocks must at least be 2 bytes - // for Literals_Block_Type and one for Sequences_Section_Header. - if cSize < 2 { - return ErrBlockTooSmall - } - case blockTypeRaw: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - - b.RLESize = 0 - // We do not need a destination for raw blocks. - maxSize = -1 - default: - panic("Invalid block type") - } - - // Read block data. - if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { - // byteBuf doesn't need a destination buffer. - if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) - } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) - } - } - b.data, err = br.readBig(cSize, b.dataStorage) - if err != nil { - if debugDecoder { - println("Reading block:", err, "(", cSize, ")", len(b.data)) - printf("%T", br) - } - return err - } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } - return nil -} - -// sendEOF will make the decoder send EOF on this frame. -func (b *blockDec) sendErr(err error) { - b.Last = true - b.Type = blockTypeReserved - b.err = err -} - -// Close will release resources. -// Closed blockDec cannot be reset. -func (b *blockDec) Close() { -} - -// decodeBuf -func (b *blockDec) decodeBuf(hist *history) error { - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxCompressedBlockSize) - } - } - b.dst = b.dst[:b.RLESize] - v := b.data[0] - for i := range b.dst { - b.dst[i] = v - } - hist.appendKeep(b.dst) - return nil - case blockTypeRaw: - hist.appendKeep(b.data) - return nil - case blockTypeCompressed: - saved := b.dst - // Append directly to history - if hist.ignoreBuffer == 0 { - b.dst = hist.b - hist.b = nil - } else { - b.dst = b.dst[:0] - } - err := b.decodeCompressed(hist) - if debugDecoder { - println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) - } - if hist.ignoreBuffer == 0 { - hist.b = b.dst - b.dst = saved - } else { - hist.appendKeep(b.dst) - } - return err - case blockTypeReserved: - // Used for returning errors. - return b.err - default: - panic("Invalid block type") - } -} - -func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { - // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header - if len(in) < 2 { - return in, ErrBlockTooSmall - } - - litType := literalsBlockType(in[0] & 3) - var litRegenSize int - var litCompSize int - sizeFormat := (in[0] >> 2) & 3 - var fourStreams bool - var literals []byte - switch litType { - case literalsBlockRaw, literalsBlockRLE: - switch sizeFormat { - case 0, 2: - // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. - litRegenSize = int(in[0] >> 3) - in = in[1:] - case 1: - // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) - in = in[2:] - case 3: - // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) - in = in[3:] - } - case literalsBlockCompressed, literalsBlockTreeless: - switch sizeFormat { - case 0, 1: - // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) - litRegenSize = int(n & 1023) - litCompSize = int(n >> 10) - fourStreams = sizeFormat == 1 - in = in[3:] - case 2: - fourStreams = true - if len(in) < 4 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) - litRegenSize = int(n & 16383) - litCompSize = int(n >> 14) - in = in[4:] - case 3: - fourStreams = true - if len(in) < 5 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) - litRegenSize = int(n & 262143) - litCompSize = int(n >> 18) - in = in[5:] - } - } - if debugDecoder { - println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) - } - if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { - return in, ErrWindowSizeExceeded - } - - switch litType { - case literalsBlockRaw: - if len(in) < litRegenSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return in, ErrBlockTooSmall - } - literals = in[:litRegenSize] - in = in[litRegenSize:] - //printf("Found %d uncompressed literals\n", litRegenSize) - case literalsBlockRLE: - if len(in) < 1 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return in, ErrBlockTooSmall - } - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - literals = b.literalBuf[:litRegenSize] - v := in[0] - for i := range literals { - literals[i] = v - } - in = in[1:] - if debugDecoder { - printf("Found %d RLE compressed literals\n", litRegenSize) - } - case literalsBlockTreeless: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - // Store compressed literals, so we defer decoding until we get history. - literals = in[:litCompSize] - in = in[litCompSize:] - if debugDecoder { - printf("Found %d compressed literals\n", litCompSize) - } - huff := hist.huffTree - if huff == nil { - return in, errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - var err error - // Use our out buffer. - huff.MaxDecodedSize = litRegenSize - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return in, err - } - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - - case literalsBlockCompressed: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - literals = in[:litCompSize] - in = in[litCompSize:] - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - huff := hist.huffTree - if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { - huff = huffDecoderPool.Get().(*huff0.Scratch) - if huff == nil { - huff = &huff0.Scratch{} - } - } - var err error - if debugDecoder { - println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) - } - huff, literals, err = huff0.ReadTable(literals, huff) - if err != nil { - println("reading huffman table:", err) - return in, err - } - hist.huffTree = huff - huff.MaxDecodedSize = litRegenSize - // Use our out buffer. - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - if err != nil { - println("decoding compressed literals:", err) - return in, err - } - // Make sure we don't leak our literals buffer - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - // Re-cap to get extra size. - literals = b.literalBuf[:len(literals)] - if debugDecoder { - printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) - } - } - hist.decoders.literals = literals - return in, nil -} - -// decodeCompressed will start decompressing a block. -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - in, err := b.decodeLiterals(in, hist) - if err != nil { - return err - } - err = b.prepareSequences(in, hist) - if err != nil { - return err - } - if hist.decoders.nSeqs == 0 { - b.dst = append(b.dst, hist.decoders.literals...) - return nil - } - before := len(hist.decoders.out) - err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) - if err != nil { - return err - } - if hist.decoders.maxSyncLen > 0 { - hist.decoders.maxSyncLen += uint64(before) - hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) - } - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - return nil -} - -func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { - if debugDecoder { - printf("prepareSequences: %d byte(s) input\n", len(in)) - } - // Decode Sequences - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section - if len(in) < 1 { - return ErrBlockTooSmall - } - var nSeqs int - seqHeader := in[0] - switch { - case seqHeader < 128: - nSeqs = int(seqHeader) - in = in[1:] - case seqHeader < 255: - if len(in) < 2 { - return ErrBlockTooSmall - } - nSeqs = int(seqHeader-128)<<8 | int(in[1]) - in = in[2:] - case seqHeader == 255: - if len(in) < 3 { - return ErrBlockTooSmall - } - nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) - in = in[3:] - } - if nSeqs == 0 && len(in) != 0 { - // When no sequences, there should not be any more data... - if debugDecoder { - printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) - } - return ErrUnexpectedBlockSize - } - - var seqs = &hist.decoders - seqs.nSeqs = nSeqs - if nSeqs > 0 { - if len(in) < 1 { - return ErrBlockTooSmall - } - br := byteReader{b: in, off: 0} - compMode := br.Uint8() - br.advance(1) - if debugDecoder { - printf("Compression modes: 0b%b", compMode) - } - for i := uint(0); i < 3; i++ { - mode := seqCompMode((compMode >> (6 - i*2)) & 3) - if debugDecoder { - println("Table", tableIndex(i), "is", mode) - } - var seq *sequenceDec - switch tableIndex(i) { - case tableLiteralLengths: - seq = &seqs.litLengths - case tableOffsets: - seq = &seqs.offsets - case tableMatchLengths: - seq = &seqs.matchLengths - default: - panic("unknown table") - } - switch mode { - case compModePredefined: - if seq.fse != nil && !seq.fse.preDefined { - fseDecoderPool.Put(seq.fse) - } - seq.fse = &fsePredef[i] - case compModeRLE: - if br.remain() < 1 { - return ErrBlockTooSmall - } - v := br.Uint8() - br.advance(1) - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - symb, err := decSymbolValue(v, symbolTableX[i]) - if err != nil { - printf("RLE Transform table (%v) error: %v", tableIndex(i), err) - return err - } - seq.fse.setRLE(symb) - if debugDecoder { - printf("RLE set to 0x%x, code: %v", symb, v) - } - case compModeFSE: - println("Reading table for", tableIndex(i)) - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) - if err != nil { - println("Read table error:", err) - return err - } - err = seq.fse.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder { - println("Read table ok", "symbolLen:", seq.fse.symbolLen) - } - case compModeRepeat: - seq.repeat = true - } - if br.overread() { - return io.ErrUnexpectedEOF - } - } - in = br.unread() - } - if debugDecoder { - println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") - } - - if nSeqs == 0 { - if len(b.sequence) > 0 { - b.sequence = b.sequence[:0] - } - return nil - } - br := seqs.br - if br == nil { - br = &bitReader{} - } - if err := br.init(in); err != nil { - return err - } - - if err := seqs.initialize(br, hist, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - // Extract blocks... - if false && hist.dict == nil { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) - var buf bytes.Buffer - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) - buf.Write(in) - os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) - } - - return nil -} - -func (b *blockDec) decodeSequences(hist *history) error { - if cap(b.sequence) < hist.decoders.nSeqs { - if b.lowMem { - b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) - } else { - b.sequence = make([]seqVals, 0, 0x7F00+0xffff) - } - } - b.sequence = b.sequence[:hist.decoders.nSeqs] - if hist.decoders.nSeqs == 0 { - hist.decoders.seqSize = len(hist.decoders.literals) - return nil - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.prevOffset = hist.recentOffsets - - err := hist.decoders.decode(b.sequence) - hist.recentOffsets = hist.decoders.prevOffset - return err -} - -func (b *blockDec) executeSequences(hist *history) error { - hbytes := hist.b - if len(hbytes) > hist.windowSize { - hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history anymore. - if hist.dict != nil { - hist.dict.content = nil - } - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.out = b.dst[:0] - err := hist.decoders.execute(b.sequence, hbytes) - if err != nil { - return err - } - return b.updateHistory(hist) -} - -func (b *blockDec) updateHistory(hist *history) error { - if len(b.data) > maxCompressedBlockSize { - return fmt.Errorf("compressed block size too large (%d)", len(b.data)) - } - // Set output and release references. - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - - if b.Last { - // if last block we don't care about history. - println("Last block, no history returned") - hist.b = hist.b[:0] - return nil - } else { - hist.append(b.dst) - if debugDecoder { - println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) - } - } - hist.decoders.out, hist.decoders.literals = nil, nil - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go deleted file mode 100644 index 2cfe925ade..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ /dev/null @@ -1,889 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - - "github.com/klauspost/compress/huff0" -) - -type blockEnc struct { - size int - literals []byte - sequences []seq - coders seqCoders - litEnc *huff0.Scratch - dictLitEnc *huff0.Scratch - wr bitWriter - - extraLits int - output []byte - recentOffsets [3]uint32 - prevRecentOffsets [3]uint32 - - last bool - lowMem bool -} - -// init should be used once the block has been created. -// If called more than once, the effect is the same as calling reset. -func (b *blockEnc) init() { - if b.lowMem { - // 1K literals - if cap(b.literals) < 1<<10 { - b.literals = make([]byte, 0, 1<<10) - } - const defSeqs = 20 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - // 1K - if cap(b.output) < 1<<10 { - b.output = make([]byte, 0, 1<<10) - } - } else { - if cap(b.literals) < maxCompressedBlockSize { - b.literals = make([]byte, 0, maxCompressedBlockSize) - } - const defSeqs = 2000 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - if cap(b.output) < maxCompressedBlockSize { - b.output = make([]byte, 0, maxCompressedBlockSize) - } - } - - if b.coders.mlEnc == nil { - b.coders.mlEnc = &fseEncoder{} - b.coders.mlPrev = &fseEncoder{} - b.coders.ofEnc = &fseEncoder{} - b.coders.ofPrev = &fseEncoder{} - b.coders.llEnc = &fseEncoder{} - b.coders.llPrev = &fseEncoder{} - } - b.litEnc = &huff0.Scratch{WantLogLess: 4} - b.reset(nil) -} - -// initNewEncode can be used to reset offsets and encoders to the initial state. -func (b *blockEnc) initNewEncode() { - b.recentOffsets = [3]uint32{1, 4, 8} - b.litEnc.Reuse = huff0.ReusePolicyNone - b.coders.setPrev(nil, nil, nil) -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) reset(prev *blockEnc) { - b.extraLits = 0 - b.literals = b.literals[:0] - b.size = 0 - b.sequences = b.sequences[:0] - b.output = b.output[:0] - b.last = false - if prev != nil { - b.recentOffsets = prev.prevRecentOffsets - } - b.dictLitEnc = nil -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) swapEncoders(prev *blockEnc) { - b.coders.swap(&prev.coders) - b.litEnc, prev.litEnc = prev.litEnc, b.litEnc -} - -// blockHeader contains the information for a block header. -type blockHeader uint32 - -// setLast sets the 'last' indicator on a block. -func (h *blockHeader) setLast(b bool) { - if b { - *h = *h | 1 - } else { - const mask = (1 << 24) - 2 - *h = *h & mask - } -} - -// setSize will store the compressed size of a block. -func (h *blockHeader) setSize(v uint32) { - const mask = 7 - *h = (*h)&mask | blockHeader(v<<3) -} - -// setType sets the block type. -func (h *blockHeader) setType(t blockType) { - const mask = 1 | (((1 << 24) - 1) ^ 7) - *h = (*h & mask) | blockHeader(t<<1) -} - -// appendTo will append the block header to a slice. -func (h blockHeader) appendTo(b []byte) []byte { - return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) -} - -// String returns a string representation of the block. -func (h blockHeader) String() string { - return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) -} - -// literalsHeader contains literals header information. -type literalsHeader uint64 - -// setType can be used to set the type of literal block. -func (h *literalsHeader) setType(t literalsBlockType) { - const mask = math.MaxUint64 - 3 - *h = (*h & mask) | literalsHeader(t) -} - -// setSize can be used to set a single size, for uncompressed and RLE content. -func (h *literalsHeader) setSize(regenLen int) { - inBits := bits.Len32(uint32(regenLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case inBits < 5: - lh |= (uint64(regenLen) << 3) | (1 << 60) - if debugEncoder { - got := int(lh>>3) & 0xff - if got != regenLen { - panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) - } - } - case inBits < 12: - lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) - case inBits < 20: - lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) - default: - panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) - } - *h = literalsHeader(lh) -} - -// setSizes will set the size of a compressed literals section and the input length. -func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { - compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case compBits <= 10 && inBits <= 10: - if !single { - lh |= 1 << 2 - } - lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) - if debugEncoder { - const mmask = (1 << 24) - 1 - n := (lh >> 4) & mmask - if int(n&1023) != inLen { - panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) - } - if int(n>>10) != compLen { - panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) - } - } - case compBits <= 14 && inBits <= 14: - lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - case compBits <= 18 && inBits <= 18: - lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - default: - panic("internal error: block too big") - } - *h = literalsHeader(lh) -} - -// appendTo will append the literals header to a byte slice. -func (h literalsHeader) appendTo(b []byte) []byte { - size := uint8(h >> 60) - switch size { - case 1: - b = append(b, uint8(h)) - case 2: - b = append(b, uint8(h), uint8(h>>8)) - case 3: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) - case 4: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) - case 5: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) - default: - panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) - } - return b -} - -// size returns the output size with currently set values. -func (h literalsHeader) size() int { - return int(h >> 60) -} - -func (h literalsHeader) String() string { - return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) pushOffsets() { - b.prevRecentOffsets = b.recentOffsets -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) popOffsets() { - b.recentOffsets = b.prevRecentOffsets -} - -// matchOffset will adjust recent offsets and return the adjusted one, -// if it matches a previous offset. -func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if true { - if lits > 0 { - switch offset { - case b.recentOffsets[0]: - offset = 1 - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } else { - switch offset { - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 1 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[0] - 1: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } - } else { - offset += 3 - } - return offset -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRaw(a []byte) { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(a))) - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output[:0]) - b.output = append(b.output, a...) - if debugEncoder { - println("Adding RAW block, length", len(a), "last:", b.last) - } -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(src))) - bh.setType(blockTypeRaw) - dst = bh.appendTo(dst) - dst = append(dst, src...) - if debugEncoder { - println("Adding RAW block, length", len(src), "last:", b.last) - } - return dst -} - -// encodeLits can be used if the block is only litLen. -func (b *blockEnc) encodeLits(lits []byte, raw bool) error { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(lits))) - - // Don't compress extremely small blocks - if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - } - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(lits) >= 1024 { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(lits, b.litEnc) - } else if len(lits) > 16 { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(lits, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - if err == nil && len(out)+5 > len(lits) { - // If we are close, we may still be worse or equal to raw. - var lh literalsHeader - lh.setSizes(len(out), len(lits), single) - if len(out)+lh.size() >= len(lits) { - err = huff0.ErrIncompressible - } - } - switch err { - case huff0.ErrIncompressible: - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - case huff0.ErrUseRLE: - if debugEncoder { - println("Adding RLE block, length", len(lits)) - } - bh.setType(blockTypeRLE) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits[0]) - return nil - case nil: - default: - return err - } - // Compressed... - // Now, allow reuse - b.litEnc.Reuse = huff0.ReusePolicyAllow - bh.setType(blockTypeCompressed) - var lh literalsHeader - if reUsed { - if debugEncoder { - println("Reused tree, compressed to", len(out)) - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - } - // Set sizes - lh.setSizes(len(out), len(lits), single) - bh.setSize(uint32(len(out) + lh.size() + 1)) - - // Write block headers. - b.output = bh.appendTo(b.output) - b.output = lh.appendTo(b.output) - // Add compressed data. - b.output = append(b.output, out...) - // No sequences. - b.output = append(b.output, 0) - return nil -} - -// fuzzFseEncoder can be used to fuzz the FSE encoder. -func fuzzFseEncoder(data []byte) int { - if len(data) > maxSequences || len(data) < 2 { - return 0 - } - enc := fseEncoder{} - hist := enc.Histogram() - maxSym := uint8(0) - for i, v := range data { - v = v & 63 - data[i] = v - hist[v]++ - if v > maxSym { - maxSym = v - } - } - if maxSym == 0 { - // All 0 - return 0 - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - cnt := maxCount(hist[:maxSym]) - if cnt == len(data) { - // RLE - return 0 - } - enc.HistogramFinished(maxSym, cnt) - err := enc.normalizeCount(len(data)) - if err != nil { - return 0 - } - _, err = enc.writeCount(nil) - if err != nil { - panic(err) - } - return 1 -} - -// encode will encode the block and append the output in b.output. -// Previous offset codes must be pushed if more blocks are expected. -func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { - if len(b.sequences) == 0 { - return b.encodeLits(b.literals, rawAllLits) - } - // We want some difference to at least account for the headers. - saved := b.size - len(b.literals) - (b.size >> 6) - if saved < 16 { - if org == nil { - return errIncompressible - } - b.popOffsets() - return b.encodeLits(org, rawAllLits) - } - - var bh blockHeader - var lh literalsHeader - bh.setLast(b.last) - bh.setType(blockTypeCompressed) - // Store offset of the block header. Needed when we know the size. - bhOffset := len(b.output) - b.output = bh.appendTo(b.output) - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(b.literals) >= 1024 && !raw { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - } else if len(b.literals) > 16 && !raw { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - - if err == nil && len(out)+5 > len(b.literals) { - // If we are close, we may still be worse or equal to raw. - var lh literalsHeader - lh.setSize(len(b.literals)) - szRaw := lh.size() - lh.setSizes(len(out), len(b.literals), single) - szComp := lh.size() - if len(out)+szComp >= len(b.literals)+szRaw { - err = huff0.ErrIncompressible - } - } - switch err { - case huff0.ErrIncompressible: - lh.setType(literalsBlockRaw) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals...) - if debugEncoder { - println("Adding literals RAW, length", len(b.literals)) - } - case huff0.ErrUseRLE: - lh.setType(literalsBlockRLE) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals[0]) - if debugEncoder { - println("Adding literals RLE") - } - case nil: - // Compressed litLen... - if reUsed { - if debugEncoder { - println("reused tree") - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("new tree, size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - if debugEncoder { - _, _, err := huff0.ReadTable(out, nil) - if err != nil { - panic(err) - } - } - } - lh.setSizes(len(out), len(b.literals), single) - if debugEncoder { - printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) - println("Adding literal header:", lh) - } - b.output = lh.appendTo(b.output) - b.output = append(b.output, out...) - b.litEnc.Reuse = huff0.ReusePolicyAllow - if debugEncoder { - println("Adding literals compressed") - } - default: - if debugEncoder { - println("Adding literals ERROR:", err) - } - return err - } - // Sequence compression - - // Write the number of sequences - switch { - case len(b.sequences) < 128: - b.output = append(b.output, uint8(len(b.sequences))) - case len(b.sequences) < 0x7f00: // TODO: this could be wrong - n := len(b.sequences) - b.output = append(b.output, 128+uint8(n>>8), uint8(n)) - default: - n := len(b.sequences) - 0x7f00 - b.output = append(b.output, 255, uint8(n), uint8(n>>8)) - } - if debugEncoder { - println("Encoding", len(b.sequences), "sequences") - } - b.genCodes() - llEnc := b.coders.llEnc - ofEnc := b.coders.ofEnc - mlEnc := b.coders.mlEnc - err = llEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = ofEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = mlEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - - // Choose the best compression mode for each type. - // Will evaluate the new vs predefined and previous. - chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { - // See if predefined/previous is better - hist := cur.count[:cur.symbolLen] - nSize := cur.approxSize(hist) + cur.maxHeaderSize() - predefSize := preDef.approxSize(hist) - prevSize := prev.approxSize(hist) - - // Add a small penalty for new encoders. - // Don't bother with extremely small (<2 byte gains). - nSize = nSize + (nSize+2*8*16)>>4 - switch { - case predefSize <= prevSize && predefSize <= nSize || forcePreDef: - if debugEncoder { - println("Using predefined", predefSize>>3, "<=", nSize>>3) - } - return preDef, compModePredefined - case prevSize <= nSize: - if debugEncoder { - println("Using previous", prevSize>>3, "<=", nSize>>3) - } - return prev, compModeRepeat - default: - if debugEncoder { - println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") - println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) - } - return cur, compModeFSE - } - } - - // Write compression mode - var mode uint8 - if llEnc.useRLE { - mode |= uint8(compModeRLE) << 6 - llEnc.setRLE(b.sequences[0].llCode) - if debugEncoder { - println("llEnc.useRLE") - } - } else { - var m seqCompMode - llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) - mode |= uint8(m) << 6 - } - if ofEnc.useRLE { - mode |= uint8(compModeRLE) << 4 - ofEnc.setRLE(b.sequences[0].ofCode) - if debugEncoder { - println("ofEnc.useRLE") - } - } else { - var m seqCompMode - ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) - mode |= uint8(m) << 4 - } - - if mlEnc.useRLE { - mode |= uint8(compModeRLE) << 2 - mlEnc.setRLE(b.sequences[0].mlCode) - if debugEncoder { - println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) - } - } else { - var m seqCompMode - mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) - mode |= uint8(m) << 2 - } - b.output = append(b.output, mode) - if debugEncoder { - printf("Compression modes: 0b%b", mode) - } - b.output, err = llEnc.writeCount(b.output) - if err != nil { - return err - } - start := len(b.output) - b.output, err = ofEnc.writeCount(b.output) - if err != nil { - return err - } - if false { - println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) - for i, v := range ofEnc.norm[:ofEnc.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) - } - } - b.output, err = mlEnc.writeCount(b.output) - if err != nil { - return err - } - - // Maybe in block? - wr := &b.wr - wr.reset(b.output) - - var ll, of, ml cState - - // Current sequence - seq := len(b.sequences) - 1 - s := b.sequences[seq] - llEnc.setBits(llBitsTable[:]) - mlEnc.setBits(mlBitsTable[:]) - ofEnc.setBits(nil) - - llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] - - // We have 3 bounds checks here (and in the loop). - // Since we are iterating backwards it is kinda hard to avoid. - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - ll.init(wr, &llEnc.ct, llB) - of.init(wr, &ofEnc.ct, ofB) - wr.flush32() - ml.init(wr, &mlEnc.ct, mlB) - - // Each of these lookups also generates a bounds check. - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - if debugSequences { - println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) - } - seq-- - // Store sequences in reverse... - for seq >= 0 { - s = b.sequences[seq] - - ofB := ofTT[s.ofCode] - wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. - //of.encode(ofB) - nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 - dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) - wr.addBits16NC(of.state, uint8(nbBitsOut)) - of.state = of.stateTable[dstState] - - // Accumulate extra bits. - outBits := ofB.outBits & 31 - extraBits := uint64(s.offset & bitMask32[outBits]) - extraBitsN := outBits - - mlB := mlTT[s.mlCode] - //ml.encode(mlB) - nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 - dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) - wr.addBits16NC(ml.state, uint8(nbBitsOut)) - ml.state = ml.stateTable[dstState] - - outBits = mlB.outBits & 31 - extraBits = extraBits<> 16 - dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) - wr.addBits16NC(ll.state, uint8(nbBitsOut)) - ll.state = ll.stateTable[dstState] - - outBits = llB.outBits & 31 - extraBits = extraBits<= b.size { - // Discard and encode as raw block. - b.output = b.encodeRawTo(b.output[:bhOffset], org) - b.popOffsets() - b.litEnc.Reuse = huff0.ReusePolicyNone - return nil - } - - // Size is output minus block header. - bh.setSize(uint32(len(b.output)-bhOffset) - 3) - if debugEncoder { - println("Rewriting block header", bh) - } - _ = bh.appendTo(b.output[bhOffset:bhOffset]) - b.coders.setPrev(llEnc, mlEnc, ofEnc) - return nil -} - -var errIncompressible = errors.New("incompressible") - -func (b *blockEnc) genCodes() { - if len(b.sequences) == 0 { - // nothing to do - return - } - if len(b.sequences) > math.MaxUint16 { - panic("can only encode up to 64K sequences") - } - // No bounds checks after here: - llH := b.coders.llEnc.Histogram() - ofH := b.coders.ofEnc.Histogram() - mlH := b.coders.mlEnc.Histogram() - for i := range llH { - llH[i] = 0 - } - for i := range ofH { - ofH[i] = 0 - } - for i := range mlH { - mlH[i] = 0 - } - - var llMax, ofMax, mlMax uint8 - for i := range b.sequences { - seq := &b.sequences[i] - v := llCode(seq.litLen) - seq.llCode = v - llH[v]++ - if v > llMax { - llMax = v - } - - v = ofCode(seq.offset) - seq.ofCode = v - ofH[v]++ - if v > ofMax { - ofMax = v - } - - v = mlCode(seq.matchLen) - seq.mlCode = v - mlH[v]++ - if v > mlMax { - mlMax = v - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) - } - } - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) - } - if debugAsserts && ofMax > maxOffsetBits { - panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) - } - if debugAsserts && llMax > maxLiteralLengthSymbol { - panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) - } - - b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) - b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) - b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) -} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go deleted file mode 100644 index 01a01e486e..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. - -package zstd - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[blockTypeRaw-0] - _ = x[blockTypeRLE-1] - _ = x[blockTypeCompressed-2] - _ = x[blockTypeReserved-3] -} - -const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" - -var _blockType_index = [...]uint8{0, 12, 24, 43, 60} - -func (i blockType) String() string { - if i >= blockType(len(_blockType_index)-1) { - return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[literalsBlockRaw-0] - _ = x[literalsBlockRLE-1] - _ = x[literalsBlockCompressed-2] - _ = x[literalsBlockTreeless-3] -} - -const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" - -var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} - -func (i literalsBlockType) String() string { - if i >= literalsBlockType(len(_literalsBlockType_index)-1) { - return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[compModePredefined-0] - _ = x[compModeRLE-1] - _ = x[compModeFSE-2] - _ = x[compModeRepeat-3] -} - -const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" - -var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} - -func (i seqCompMode) String() string { - if i >= seqCompMode(len(_seqCompMode_index)-1) { - return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[tableLiteralLengths-0] - _ = x[tableOffsets-1] - _ = x[tableMatchLengths-2] -} - -const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" - -var _tableIndex_index = [...]uint8{0, 19, 31, 48} - -func (i tableIndex) String() string { - if i >= tableIndex(len(_tableIndex_index)-1) { - return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go deleted file mode 100644 index 55a388553d..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "io" -) - -type byteBuffer interface { - // Read up to 8 bytes. - // Returns io.ErrUnexpectedEOF if this cannot be satisfied. - readSmall(n int) ([]byte, error) - - // Read >8 bytes. - // MAY use the destination slice. - readBig(n int, dst []byte) ([]byte, error) - - // Read a single byte. - readByte() (byte, error) - - // Skip n bytes. - skipN(n int64) error -} - -// in-memory buffer -type byteBuf []byte - -func (b *byteBuf) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readByte() (byte, error) { - bb := *b - if len(bb) < 1 { - return 0, io.ErrUnexpectedEOF - } - r := bb[0] - *b = bb[1:] - return r, nil -} - -func (b *byteBuf) skipN(n int64) error { - bb := *b - if n < 0 { - return fmt.Errorf("negative skip (%d) requested", n) - } - if int64(len(bb)) < n { - return io.ErrUnexpectedEOF - } - *b = bb[n:] - return nil -} - -// wrapper around a reader. -type readerWrapper struct { - r io.Reader - tmp [8]byte -} - -func (r *readerWrapper) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - n2, err := io.ReadFull(r.r, r.tmp[:n]) - // We only really care about the actual bytes read. - if err != nil { - if err == io.EOF { - return nil, io.ErrUnexpectedEOF - } - if debugDecoder { - println("readSmall: got", n2, "want", n, "err", err) - } - return nil, err - } - return r.tmp[:n], nil -} - -func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { - if cap(dst) < n { - dst = make([]byte, n) - } - n2, err := io.ReadFull(r.r, dst[:n]) - if err == io.EOF && n > 0 { - err = io.ErrUnexpectedEOF - } - return dst[:n2], err -} - -func (r *readerWrapper) readByte() (byte, error) { - n2, err := io.ReadFull(r.r, r.tmp[:1]) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, err - } - if n2 != 1 { - return 0, io.ErrUnexpectedEOF - } - return r.tmp[0], nil -} - -func (r *readerWrapper) skipN(n int64) error { - n2, err := io.CopyN(io.Discard, r.r, n) - if n2 != n { - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go deleted file mode 100644 index 0e59a242d8..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// overread returns whether we have advanced too far. -func (b *byteReader) overread() bool { - return b.off > len(b.b) -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := int32(b2[3]) - v2 := int32(b2[2]) - v1 := int32(b2[1]) - v0 := int32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint8 returns the next byte -func (b *byteReader) Uint8() uint8 { - v := b.b[b.off] - return v -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - if r := b.remain(); r < 4 { - // Very rare - v := uint32(0) - for i := 1; i <= r; i++ { - v = (v << 8) | uint32(b.b[len(b.b)-i]) - } - return v - } - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint32NC returns a little endian uint32 starting at current offset. -// The caller must be sure if there are at least 4 bytes left. -func (b byteReader) Uint32NC() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go deleted file mode 100644 index f6a240970d..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2020+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "encoding/binary" - "errors" - "io" -) - -// HeaderMaxSize is the maximum size of a Frame and Block Header. -// If less is sent to Header.Decode it *may* still contain enough information. -const HeaderMaxSize = 14 + 3 - -// Header contains information about the first frame and block within that. -type Header struct { - // SingleSegment specifies whether the data is to be decompressed into a - // single contiguous memory segment. - // It implies that WindowSize is invalid and that FrameContentSize is valid. - SingleSegment bool - - // WindowSize is the window of data to keep while decoding. - // Will only be set if SingleSegment is false. - WindowSize uint64 - - // Dictionary ID. - // If 0, no dictionary. - DictionaryID uint32 - - // HasFCS specifies whether FrameContentSize has a valid value. - HasFCS bool - - // FrameContentSize is the expected uncompressed size of the entire frame. - FrameContentSize uint64 - - // Skippable will be true if the frame is meant to be skipped. - // This implies that FirstBlock.OK is false. - Skippable bool - - // SkippableID is the user-specific ID for the skippable frame. - // Valid values are between 0 to 15, inclusive. - SkippableID int - - // SkippableSize is the length of the user data to skip following - // the header. - SkippableSize uint32 - - // HeaderSize is the raw size of the frame header. - // - // For normal frames, it includes the size of the magic number and - // the size of the header (per section 3.1.1.1). - // It does not include the size for any data blocks (section 3.1.1.2) nor - // the size for the trailing content checksum. - // - // For skippable frames, this counts the size of the magic number - // along with the size of the size field of the payload. - // It does not include the size of the skippable payload itself. - // The total frame size is the HeaderSize plus the SkippableSize. - HeaderSize int - - // First block information. - FirstBlock struct { - // OK will be set if first block could be decoded. - OK bool - - // Is this the last block of a frame? - Last bool - - // Is the data compressed? - // If true CompressedSize will be populated. - // Unfortunately DecompressedSize cannot be determined - // without decoding the blocks. - Compressed bool - - // DecompressedSize is the expected decompressed size of the block. - // Will be 0 if it cannot be determined. - DecompressedSize int - - // CompressedSize of the data in the block. - // Does not include the block header. - // Will be equal to DecompressedSize if not Compressed. - CompressedSize int - } - - // If set there is a checksum present for the block content. - // The checksum field at the end is always 4 bytes long. - HasCheckSum bool -} - -// Decode the header from the beginning of the stream. -// This will decode the frame header and the first block header if enough bytes are provided. -// It is recommended to provide at least HeaderMaxSize bytes. -// If the frame header cannot be read an error will be returned. -// If there isn't enough input, io.ErrUnexpectedEOF is returned. -// The FirstBlock.OK will indicate if enough information was available to decode the first block header. -func (h *Header) Decode(in []byte) error { - *h = Header{} - if len(in) < 4 { - return io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - b, in := in[:4], in[4:] - if string(b) != frameMagic { - if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { - return ErrMagicMismatch - } - if len(in) < 4 { - return io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - h.Skippable = true - h.SkippableID = int(b[0] & 0xf) - h.SkippableSize = binary.LittleEndian.Uint32(in) - return nil - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - if len(in) < 1 { - return io.ErrUnexpectedEOF - } - fhd, in := in[0], in[1:] - h.HeaderSize++ - h.SingleSegment = fhd&(1<<5) != 0 - h.HasCheckSum = fhd&(1<<2) != 0 - if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") - } - - if !h.SingleSegment { - if len(in) < 1 { - return io.ErrUnexpectedEOF - } - var wd byte - wd, in = in[0], in[1:] - h.HeaderSize++ - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - h.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - if len(in) < int(size) { - return io.ErrUnexpectedEOF - } - b, in = in[:size], in[size:] - h.HeaderSize += int(size) - switch len(b) { - case 1: - h.DictionaryID = uint32(b[0]) - case 2: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if h.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - - if fcsSize > 0 { - h.HasFCS = true - if len(in) < fcsSize { - return io.ErrUnexpectedEOF - } - b, in = in[:fcsSize], in[fcsSize:] - h.HeaderSize += int(fcsSize) - switch len(b) { - case 1: - h.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - } - - // Frame Header done, we will not fail from now on. - if len(in) < 3 { - return nil - } - tmp := in[:3] - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - h.FirstBlock.Last = bh&1 != 0 - blockType := blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - switch blockType { - case blockTypeReserved: - return nil - case blockTypeRLE: - h.FirstBlock.Compressed = true - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = 1 - case blockTypeCompressed: - h.FirstBlock.Compressed = true - h.FirstBlock.CompressedSize = cSize - case blockTypeRaw: - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = cSize - default: - panic("Invalid block type") - } - - h.FirstBlock.OK = true - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go deleted file mode 100644 index f04aaa21eb..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ /dev/null @@ -1,948 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "context" - "encoding/binary" - "io" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Decoder provides decoding of zstandard streams. -// The decoder has been designed to operate without allocations after a warmup. -// This means that you should store the decoder for best performance. -// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. -// A decoder can safely be re-used even if the previous stream failed. -// To release the resources, you must call the Close() function on a decoder. -type Decoder struct { - o decoderOptions - - // Unreferenced decoders, ready for use. - decoders chan *blockDec - - // Current read position used for Reader functionality. - current decoderState - - // sync stream decoding - syncStream struct { - decodedFrame uint64 - br readerWrapper - enabled bool - inFrame bool - dstBuf []byte - } - - frame *frameDec - - // Custom dictionaries. - dicts map[uint32]*dict - - // streamWg is the waitgroup for all streams - streamWg sync.WaitGroup -} - -// decoderState is used for maintaining state when the decoder -// is used for streaming. -type decoderState struct { - // current block being written to stream. - decodeOutput - - // output in order to be written to stream. - output chan decodeOutput - - // cancel remaining output. - cancel context.CancelFunc - - // crc of current frame - crc *xxhash.Digest - - flushed bool -} - -var ( - // Check the interfaces we want to support. - _ = io.WriterTo(&Decoder{}) - _ = io.Reader(&Decoder{}) -) - -// NewReader creates a new decoder. -// A nil Reader can be provided in which case Reset can be used to start a decode. -// -// A Decoder can be used in two modes: -// -// 1) As a stream, or -// 2) For stateless decoding using DecodeAll. -// -// Only a single stream can be decoded concurrently, but the same decoder -// can run multiple concurrent stateless decodes. It is even possible to -// use stateless decodes while a stream is being decoded. -// -// The Reset function can be used to initiate a new stream, which is will considerably -// reduce the allocations normally caused by NewReader. -func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { - initPredefined() - var d Decoder - d.o.setDefault() - for _, o := range opts { - err := o(&d.o) - if err != nil { - return nil, err - } - } - d.current.crc = xxhash.New() - d.current.flushed = true - - if r == nil { - d.current.err = ErrDecoderNilInput - } - - // Transfer option dicts. - d.dicts = make(map[uint32]*dict, len(d.o.dicts)) - for _, dc := range d.o.dicts { - d.dicts[dc.id] = dc - } - d.o.dicts = nil - - // Create decoders - d.decoders = make(chan *blockDec, d.o.concurrent) - for i := 0; i < d.o.concurrent; i++ { - dec := newBlockDec(d.o.lowMem) - dec.localFrame = newFrameDec(d.o) - d.decoders <- dec - } - - if r == nil { - return &d, nil - } - return &d, d.Reset(r) -} - -// Read bytes from the decompressed stream into p. -// Returns the number of bytes written and any error that occurred. -// When the stream is done, io.EOF will be returned. -func (d *Decoder) Read(p []byte) (int, error) { - var n int - for { - if len(d.current.b) > 0 { - filled := copy(p, d.current.b) - p = p[filled:] - d.current.b = d.current.b[filled:] - n += filled - } - if len(p) == 0 { - break - } - if len(d.current.b) == 0 { - // We have an error and no more data - if d.current.err != nil { - break - } - if !d.nextBlock(n == 0) { - return n, d.current.err - } - } - } - if len(d.current.b) > 0 { - if debugDecoder { - println("returning", n, "still bytes left:", len(d.current.b)) - } - // Only return error at end of block - return n, nil - } - if d.current.err != nil { - d.drainOutput() - } - if debugDecoder { - println("returning", n, d.current.err, len(d.decoders)) - } - return n, d.current.err -} - -// Reset will reset the decoder the supplied stream after the current has finished processing. -// Note that this functionality cannot be used after Close has been called. -// Reset can be called with a nil reader to release references to the previous reader. -// After being called with a nil reader, no other operations than Reset or DecodeAll or Close -// should be used. -func (d *Decoder) Reset(r io.Reader) error { - if d.current.err == ErrDecoderClosed { - return d.current.err - } - - d.drainOutput() - - d.syncStream.br.r = nil - if r == nil { - d.current.err = ErrDecoderNilInput - if len(d.current.b) > 0 { - d.current.b = d.current.b[:0] - } - d.current.flushed = true - return nil - } - - // If bytes buffer and < 5MB, do sync decoding anyway. - if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { - bb2 := bb - if debugDecoder { - println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) - } - b := bb2.Bytes() - var dst []byte - if cap(d.syncStream.dstBuf) > 0 { - dst = d.syncStream.dstBuf[:0] - } - - dst, err := d.DecodeAll(b, dst) - if err == nil { - err = io.EOF - } - // Save output buffer - d.syncStream.dstBuf = dst - d.current.b = dst - d.current.err = err - d.current.flushed = true - if debugDecoder { - println("sync decode to", len(dst), "bytes, err:", err) - } - return nil - } - // Remove current block. - d.stashDecoder() - d.current.decodeOutput = decodeOutput{} - d.current.err = nil - d.current.flushed = false - d.current.d = nil - d.syncStream.dstBuf = nil - - // Ensure no-one else is still running... - d.streamWg.Wait() - if d.frame == nil { - d.frame = newFrameDec(d.o) - } - - if d.o.concurrent == 1 { - return d.startSyncDecoder(r) - } - - d.current.output = make(chan decodeOutput, d.o.concurrent) - ctx, cancel := context.WithCancel(context.Background()) - d.current.cancel = cancel - d.streamWg.Add(1) - go d.startStreamDecoder(ctx, r, d.current.output) - - return nil -} - -// drainOutput will drain the output until errEndOfStream is sent. -func (d *Decoder) drainOutput() { - if d.current.cancel != nil { - if debugDecoder { - println("cancelling current") - } - d.current.cancel() - d.current.cancel = nil - } - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) - } - d.decoders <- d.current.d - d.current.d = nil - d.current.b = nil - } - if d.current.output == nil || d.current.flushed { - println("current already flushed") - return - } - for v := range d.current.output { - if v.d != nil { - if debugDecoder { - printf("re-adding decoder %p", v.d) - } - d.decoders <- v.d - } - } - d.current.output = nil - d.current.flushed = true -} - -// WriteTo writes data to w until there's no more data to write or when an error occurs. -// The return value n is the number of bytes written. -// Any error encountered during the write is also returned. -func (d *Decoder) WriteTo(w io.Writer) (int64, error) { - var n int64 - for { - if len(d.current.b) > 0 { - n2, err2 := w.Write(d.current.b) - n += int64(n2) - if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { - d.current.err = err2 - } else if n2 != len(d.current.b) { - d.current.err = io.ErrShortWrite - } - } - if d.current.err != nil { - break - } - d.nextBlock(true) - } - err := d.current.err - if err != nil { - d.drainOutput() - } - if err == io.EOF { - err = nil - } - return n, err -} - -// DecodeAll allows stateless decoding of a blob of bytes. -// Output will be appended to dst, so if the destination size is known -// you can pre-allocate the destination slice to avoid allocations. -// DecodeAll can be used concurrently. -// The Decoder concurrency limits will be respected. -func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.decoders == nil { - return dst, ErrDecoderClosed - } - - // Grab a block decoder and frame decoder. - block := <-d.decoders - frame := block.localFrame - initialSize := len(dst) - defer func() { - if debugDecoder { - printf("re-adding decoder: %p", block) - } - frame.rawInput = nil - frame.bBuf = nil - if frame.history.decoders.br != nil { - frame.history.decoders.br.in = nil - } - d.decoders <- block - }() - frame.bBuf = input - - for { - frame.history.reset() - err := frame.reset(&frame.bBuf) - if err != nil { - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") - } - return dst, nil - } - return dst, err - } - if err = d.setDict(frame); err != nil { - return nil, err - } - if frame.WindowSize > d.o.maxWindowSize { - if debugDecoder { - println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) - } - return dst, ErrWindowSizeExceeded - } - if frame.FrameContentSize != fcsUnknown { - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { - if debugDecoder { - println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) - } - return dst, ErrDecoderSizeExceeded - } - if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { - if debugDecoder { - println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) - } - return dst, ErrDecoderSizeExceeded - } - if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - - if cap(dst) == 0 && !d.o.limitToCap { - // Allocate len(input) * 2 by default if nothing is provided - // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } - if uint64(size) > d.o.maxDecodedSize { - size = int(d.o.maxDecodedSize) - } - dst = make([]byte, 0, size) - } - - dst, err = frame.runDecoder(dst, block) - if err != nil { - return dst, err - } - if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { - return dst, ErrDecoderSizeExceeded - } - if len(frame.bBuf) == 0 { - if debugDecoder { - println("frame dbuf empty") - } - break - } - } - return dst, nil -} - -// nextBlock returns the next block. -// If an error occurs d.err will be set. -// Optionally the function can block for new output. -// If non-blocking mode is used the returned boolean will be false -// if no data was available without blocking. -func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.err != nil { - // Keep error state. - return false - } - d.current.b = d.current.b[:0] - - // SYNC: - if d.syncStream.enabled { - if !blocking { - return false - } - ok = d.nextBlockSync() - if !ok { - d.stashDecoder() - } - return ok - } - - //ASYNC: - d.stashDecoder() - if blocking { - d.current.decodeOutput, ok = <-d.current.output - } else { - select { - case d.current.decodeOutput, ok = <-d.current.output: - default: - return false - } - } - if !ok { - // This should not happen, so signal error state... - d.current.err = io.ErrUnexpectedEOF - return false - } - next := d.current.decodeOutput - if next.d != nil && next.d.async.newHist != nil { - d.current.crc.Reset() - } - if debugDecoder { - var tmp [4]byte - binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) - println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) - } - - if d.o.ignoreChecksum { - return true - } - - if len(next.b) > 0 { - d.current.crc.Write(next.b) - } - if next.err == nil && next.d != nil && next.d.hasCRC { - got := uint32(d.current.crc.Sum64()) - if got != next.d.checkCRC { - if debugDecoder { - printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) - } - d.current.err = ErrCRCMismatch - } else { - if debugDecoder { - printf("CRC ok %08x\n", got) - } - } - } - - return true -} - -func (d *Decoder) nextBlockSync() (ok bool) { - if d.current.d == nil { - d.current.d = <-d.decoders - } - for len(d.current.b) == 0 { - if !d.syncStream.inFrame { - d.frame.history.reset() - d.current.err = d.frame.reset(&d.syncStream.br) - if d.current.err == nil { - d.current.err = d.setDict(d.frame) - } - if d.current.err != nil { - return false - } - if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { - d.current.err = ErrDecoderSizeExceeded - return false - } - - d.syncStream.decodedFrame = 0 - d.syncStream.inFrame = true - } - d.current.err = d.frame.next(d.current.d) - if d.current.err != nil { - return false - } - d.frame.history.ensureBlock() - if debugDecoder { - println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) - } - histBefore := len(d.frame.history.b) - d.current.err = d.current.d.decodeBuf(&d.frame.history) - - if d.current.err != nil { - println("error after:", d.current.err) - return false - } - d.current.b = d.frame.history.b[histBefore:] - if debugDecoder { - println("history after:", len(d.frame.history.b)) - } - - // Check frame size (before CRC) - d.syncStream.decodedFrame += uint64(len(d.current.b)) - if d.syncStream.decodedFrame > d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeExceeded - return false - } - - // Check FCS - if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeMismatch - return false - } - - // Update/Check CRC - if d.frame.HasCheckSum { - if !d.o.ignoreChecksum { - d.frame.crc.Write(d.current.b) - } - if d.current.d.Last { - if !d.o.ignoreChecksum { - d.current.err = d.frame.checkCRC() - } else { - d.current.err = d.frame.consumeCRC() - } - if d.current.err != nil { - println("CRC error:", d.current.err) - return false - } - } - } - d.syncStream.inFrame = !d.current.d.Last - } - return true -} - -func (d *Decoder) stashDecoder() { - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } -} - -// Close will release all resources. -// It is NOT possible to reuse the decoder after this. -func (d *Decoder) Close() { - if d.current.err == ErrDecoderClosed { - return - } - d.drainOutput() - if d.current.cancel != nil { - d.current.cancel() - d.streamWg.Wait() - d.current.cancel = nil - } - if d.decoders != nil { - close(d.decoders) - for dec := range d.decoders { - dec.Close() - } - d.decoders = nil - } - if d.current.d != nil { - d.current.d.Close() - d.current.d = nil - } - d.current.err = ErrDecoderClosed -} - -// IOReadCloser returns the decoder as an io.ReadCloser for convenience. -// Any changes to the decoder will be reflected, so the returned ReadCloser -// can be reused along with the decoder. -// io.WriterTo is also supported by the returned ReadCloser. -func (d *Decoder) IOReadCloser() io.ReadCloser { - return closeWrapper{d: d} -} - -// closeWrapper wraps a function call as a closer. -type closeWrapper struct { - d *Decoder -} - -// WriteTo forwards WriteTo calls to the decoder. -func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { - return c.d.WriteTo(w) -} - -// Read forwards read calls to the decoder. -func (c closeWrapper) Read(p []byte) (n int, err error) { - return c.d.Read(p) -} - -// Close closes the decoder. -func (c closeWrapper) Close() error { - c.d.Close() - return nil -} - -type decodeOutput struct { - d *blockDec - b []byte - err error -} - -func (d *Decoder) startSyncDecoder(r io.Reader) error { - d.frame.history.reset() - d.syncStream.br = readerWrapper{r: r} - d.syncStream.inFrame = false - d.syncStream.enabled = true - d.syncStream.decodedFrame = 0 - return nil -} - -// Create Decoder: -// ASYNC: -// Spawn 3 go routines. -// 0: Read frames and decode block literals. -// 1: Decode sequences. -// 2: Execute sequences, send to output. -func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { - defer d.streamWg.Done() - br := readerWrapper{r: r} - - var seqDecode = make(chan *blockDec, d.o.concurrent) - var seqExecute = make(chan *blockDec, d.o.concurrent) - - // Async 1: Decode sequences... - go func() { - var hist history - var hasErr bool - - for block := range seqDecode { - if hasErr { - if block != nil { - seqExecute <- block - } - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 1: new history, recent:", block.async.newHist.recentOffsets) - } - hist.reset() - hist.decoders = block.async.newHist.decoders - hist.recentOffsets = block.async.newHist.recentOffsets - hist.windowSize = block.async.newHist.windowSize - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqExecute <- block - continue - } - - hist.decoders.literals = block.async.literals - block.err = block.prepareSequences(block.async.seqData, &hist) - if debugDecoder && block.err != nil { - println("prepareSequences returned:", block.err) - } - hasErr = block.err != nil - if block.err == nil { - block.err = block.decodeSequences(&hist) - if debugDecoder && block.err != nil { - println("decodeSequences returned:", block.err) - } - hasErr = block.err != nil - // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] - block.async.seqSize = hist.decoders.seqSize - } - seqExecute <- block - } - close(seqExecute) - hist.reset() - }() - - var wg sync.WaitGroup - wg.Add(1) - - // Async 3: Execute sequences... - frameHistCache := d.frame.history.b - go func() { - var hist history - var decodedFrame uint64 - var fcs uint64 - var hasErr bool - for block := range seqExecute { - out := decodeOutput{err: block.err, d: block} - if block.err != nil || hasErr { - hasErr = true - output <- out - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 2: new history") - } - hist.reset() - hist.windowSize = block.async.newHist.windowSize - hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - - if cap(hist.b) < hist.allocFrameBuffer { - if cap(frameHistCache) >= hist.allocFrameBuffer { - hist.b = frameHistCache - } else { - hist.b = make([]byte, 0, hist.allocFrameBuffer) - println("Alloc history sized", hist.allocFrameBuffer) - } - } - hist.b = hist.b[:0] - fcs = block.async.fcs - decodedFrame = 0 - } - do := decodeOutput{err: block.err, d: block} - switch block.Type { - case blockTypeRLE: - if debugDecoder { - println("add rle block length:", block.RLESize) - } - - if cap(block.dst) < int(block.RLESize) { - if block.lowMem { - block.dst = make([]byte, block.RLESize) - } else { - block.dst = make([]byte, maxCompressedBlockSize) - } - } - block.dst = block.dst[:block.RLESize] - v := block.data[0] - for i := range block.dst { - block.dst[i] = v - } - hist.append(block.dst) - do.b = block.dst - case blockTypeRaw: - if debugDecoder { - println("add raw block length:", len(block.data)) - } - hist.append(block.data) - do.b = block.data - case blockTypeCompressed: - if debugDecoder { - println("execute with history length:", len(hist.b), "window:", hist.windowSize) - } - hist.decoders.seqSize = block.async.seqSize - hist.decoders.literals = block.async.literals - do.err = block.executeSequences(&hist) - hasErr = do.err != nil - if debugDecoder && hasErr { - println("executeSequences returned:", do.err) - } - do.b = block.dst - } - if !hasErr { - decodedFrame += uint64(len(do.b)) - if decodedFrame > fcs { - println("fcs exceeded", block.Last, fcs, decodedFrame) - do.err = ErrFrameSizeExceeded - hasErr = true - } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { - do.err = ErrFrameSizeMismatch - hasErr = true - } else { - if debugDecoder { - println("fcs ok", block.Last, fcs, decodedFrame) - } - } - } - output <- do - } - close(output) - frameHistCache = hist.b - wg.Done() - if debugDecoder { - println("decoder goroutines finished") - } - hist.reset() - }() - - var hist history -decodeStream: - for { - var hasErr bool - hist.reset() - decodeBlock := func(block *blockDec) { - if hasErr { - if block != nil { - seqDecode <- block - } - return - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqDecode <- block - return - } - - remain, err := block.decodeLiterals(block.data, &hist) - block.err = err - hasErr = block.err != nil - if err == nil { - block.async.literals = hist.decoders.literals - block.async.seqData = remain - } else if debugDecoder { - println("decodeLiterals error:", err) - } - seqDecode <- block - } - frame := d.frame - if debugDecoder { - println("New frame...") - } - var historySent bool - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) - } - if err == nil { - err = d.setDict(frame) - } - if err == nil && d.frame.WindowSize > d.o.maxWindowSize { - if debugDecoder { - println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) - } - - err = ErrDecoderSizeExceeded - } - if err != nil { - select { - case <-ctx.Done(): - case dec := <-d.decoders: - dec.sendErr(err) - decodeBlock(dec) - } - break decodeStream - } - - // Go through all blocks of the frame. - for { - var dec *blockDec - select { - case <-ctx.Done(): - break decodeStream - case dec = <-d.decoders: - // Once we have a decoder, we MUST return it. - } - err := frame.next(dec) - if !historySent { - h := frame.history - if debugDecoder { - println("Alloc History:", h.allocFrameBuffer) - } - hist.reset() - if h.dict != nil { - hist.setDict(h.dict) - } - dec.async.newHist = &h - dec.async.fcs = frame.FrameContentSize - historySent = true - } else { - dec.async.newHist = nil - } - if debugDecoder && err != nil { - println("next block returned error:", err) - } - dec.err = err - dec.hasCRC = false - if dec.Last && frame.HasCheckSum && err == nil { - crc, err := frame.rawInput.readSmall(4) - if len(crc) < 4 { - if err == nil { - err = io.ErrUnexpectedEOF - - } - println("CRC missing?", err) - dec.err = err - } else { - dec.checkCRC = binary.LittleEndian.Uint32(crc) - dec.hasCRC = true - if debugDecoder { - printf("found crc to check: %08x\n", dec.checkCRC) - } - } - } - err = dec.err - last := dec.Last - decodeBlock(dec) - if err != nil { - break decodeStream - } - if last { - break - } - } - } - close(seqDecode) - wg.Wait() - hist.reset() - d.frame.history.b = frameHistCache -} - -func (d *Decoder) setDict(frame *frameDec) (err error) { - dict, ok := d.dicts[frame.DictionaryID] - if ok { - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(dict) - } else if frame.DictionaryID != 0 { - // A zero or missing dictionary id is ambiguous: - // either dictionary zero, or no dictionary. In particular, - // zstd --patch-from uses this id for the source file, - // so only return an error if the dictionary id is not zero. - err = ErrUnknownDictionary - } - return err -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go deleted file mode 100644 index 774c5f00fe..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math/bits" - "runtime" -) - -// DOption is an option for creating a decoder. -type DOption func(*decoderOptions) error - -// options retains accumulated state of multiple options. -type decoderOptions struct { - lowMem bool - concurrent int - maxDecodedSize uint64 - maxWindowSize uint64 - dicts []*dict - ignoreChecksum bool - limitToCap bool - decodeBufsBelow int -} - -func (o *decoderOptions) setDefault() { - *o = decoderOptions{ - // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), - maxWindowSize: MaxWindowSize, - decodeBufsBelow: 128 << 10, - } - if o.concurrent > 4 { - o.concurrent = 4 - } - o.maxDecodedSize = 64 << 30 -} - -// WithDecoderLowmem will set whether to use a lower amount of memory, -// but possibly have to allocate more while running. -func WithDecoderLowmem(b bool) DOption { - return func(o *decoderOptions) error { o.lowMem = b; return nil } -} - -// WithDecoderConcurrency sets the number of created decoders. -// When decoding block with DecodeAll, this will limit the number -// of possible concurrently running decodes. -// When decoding streams, this will limit the number of -// inflight blocks. -// When decoding streams and setting maximum to 1, -// no async decoding will be done. -// When a value of 0 is provided GOMAXPROCS will be used. -// By default this will be set to 4 or GOMAXPROCS, whatever is lower. -func WithDecoderConcurrency(n int) DOption { - return func(o *decoderOptions) error { - if n < 0 { - return errors.New("concurrency must be at least 1") - } - if n == 0 { - o.concurrent = runtime.GOMAXPROCS(0) - } else { - o.concurrent = n - } - return nil - } -} - -// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory -// non-streaming operations or maximum window size for streaming operations. -// This can be used to control memory usage of potentially hostile content. -// Maximum is 1 << 63 bytes. Default is 64GiB. -func WithDecoderMaxMemory(n uint64) DOption { - return func(o *decoderOptions) error { - if n == 0 { - return errors.New("WithDecoderMaxMemory must be at least 1") - } - if n > 1<<63 { - return errors.New("WithDecoderMaxmemory must be less than 1 << 63") - } - o.maxDecodedSize = n - return nil - } -} - -// WithDecoderDicts allows to register one or more dictionaries for the decoder. -// -// Each slice in dict must be in the [dictionary format] produced by -// "zstd --train" from the Zstandard reference implementation. -// -// If several dictionaries with the same ID are provided, the last one will be used. -// -// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format -func WithDecoderDicts(dicts ...[]byte) DOption { - return func(o *decoderOptions) error { - for _, b := range dicts { - d, err := loadDict(b) - if err != nil { - return err - } - o.dicts = append(o.dicts, d) - } - return nil - } -} - -// WithDecoderDictRaw registers a dictionary that may be used by the decoder. -// The slice content can be arbitrary data. -func WithDecoderDictRaw(id uint32, content []byte) DOption { - return func(o *decoderOptions) error { - if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { - return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) - } - o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) - return nil - } -} - -// WithDecoderMaxWindow allows to set a maximum window size for decodes. -// This allows rejecting packets that will cause big memory usage. -// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. -// If WithDecoderMaxMemory is set to a lower value, that will be used. -// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. -func WithDecoderMaxWindow(size uint64) DOption { - return func(o *decoderOptions) error { - if size < MinWindowSize { - return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") - } - if size > (1<<41)+7*(1<<38) { - return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") - } - o.maxWindowSize = size - return nil - } -} - -// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, -// or any size set in WithDecoderMaxMemory. -// This can be used to limit decoding to a specific maximum output size. -// Disabled by default. -func WithDecodeAllCapLimit(b bool) DOption { - return func(o *decoderOptions) error { - o.limitToCap = b - return nil - } -} - -// WithDecodeBuffersBelow will fully decode readers that have a -// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. -// This typically uses less allocations but will have the full decompressed object in memory. -// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. -// Default is 128KiB. -func WithDecodeBuffersBelow(size int) DOption { - return func(o *decoderOptions) error { - o.decodeBufsBelow = size - return nil - } -} - -// IgnoreChecksum allows to forcibly ignore checksum checking. -func IgnoreChecksum(b bool) DOption { - return func(o *decoderOptions) error { - o.ignoreChecksum = b - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go deleted file mode 100644 index 8d5567fe64..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ /dev/null @@ -1,534 +0,0 @@ -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "sort" - - "github.com/klauspost/compress/huff0" -) - -type dict struct { - id uint32 - - litEnc *huff0.Scratch - llDec, ofDec, mlDec sequenceDec - offsets [3]int - content []byte -} - -const dictMagic = "\x37\xa4\x30\xec" - -// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. -const dictMaxLength = 1 << 31 - -// ID returns the dictionary id or 0 if d is nil. -func (d *dict) ID() uint32 { - if d == nil { - return 0 - } - return d.id -} - -// ContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) ContentSize() int { - if d == nil { - return 0 - } - return len(d.content) -} - -// Content returns the dictionary content. -func (d *dict) Content() []byte { - if d == nil { - return nil - } - return d.content -} - -// Offsets returns the initial offsets. -func (d *dict) Offsets() [3]int { - if d == nil { - return [3]int{} - } - return d.offsets -} - -// LitEncoder returns the literal encoder. -func (d *dict) LitEncoder() *huff0.Scratch { - if d == nil { - return nil - } - return d.litEnc -} - -// Load a dictionary as described in -// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format -func loadDict(b []byte) (*dict, error) { - // Check static field size. - if len(b) <= 8+(3*4) { - return nil, io.ErrUnexpectedEOF - } - d := dict{ - llDec: sequenceDec{fse: &fseDecoder{}}, - ofDec: sequenceDec{fse: &fseDecoder{}}, - mlDec: sequenceDec{fse: &fseDecoder{}}, - } - if string(b[:4]) != dictMagic { - return nil, ErrMagicMismatch - } - d.id = binary.LittleEndian.Uint32(b[4:8]) - if d.id == 0 { - return nil, errors.New("dictionaries cannot have ID 0") - } - - // Read literal table - var err error - d.litEnc, b, err = huff0.ReadTable(b[8:], nil) - if err != nil { - return nil, fmt.Errorf("loading literal table: %w", err) - } - d.litEnc.Reuse = huff0.ReusePolicyMust - - br := byteReader{ - b: b, - off: 0, - } - readDec := func(i tableIndex, dec *fseDecoder) error { - if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { - return err - } - if br.overread() { - return io.ErrUnexpectedEOF - } - err = dec.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder || debugEncoder { - println("Read table ok", "symbolLen:", dec.symbolLen) - } - // Set decoders as predefined so they aren't reused. - dec.preDefined = true - return nil - } - - if err := readDec(tableOffsets, d.ofDec.fse); err != nil { - return nil, err - } - if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { - return nil, err - } - if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { - return nil, err - } - if br.remain() < 12 { - return nil, io.ErrUnexpectedEOF - } - - d.offsets[0] = int(br.Uint32()) - br.advance(4) - d.offsets[1] = int(br.Uint32()) - br.advance(4) - d.offsets[2] = int(br.Uint32()) - br.advance(4) - if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { - return nil, errors.New("invalid offset in dictionary") - } - d.content = make([]byte, br.remain()) - copy(d.content, br.unread()) - if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { - return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) - } - - return &d, nil -} - -// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. -func InspectDictionary(b []byte) (interface { - ID() uint32 - ContentSize() int - Content() []byte - Offsets() [3]int - LitEncoder() *huff0.Scratch -}, error) { - initPredefined() - d, err := loadDict(b) - return d, err -} - -type BuildDictOptions struct { - // Dictionary ID. - ID uint32 - - // Content to use to create dictionary tables. - Contents [][]byte - - // History to use for all blocks. - History []byte - - // Offsets to use. - Offsets [3]int - - // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. - // See https://github.com/facebook/zstd/issues/3724 - CompatV155 bool - - // Use the specified encoder level. - // The dictionary will be built using the specified encoder level, - // which will reflect speed and make the dictionary tailored for that level. - // If not set SpeedBestCompression will be used. - Level EncoderLevel - - // DebugOut will write stats and other details here if set. - DebugOut io.Writer -} - -func BuildDict(o BuildDictOptions) ([]byte, error) { - initPredefined() - hist := o.History - contents := o.Contents - debug := o.DebugOut != nil - println := func(args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprintln(o.DebugOut, args...) - } - } - printf := func(s string, args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprintf(o.DebugOut, s, args...) - } - } - print := func(args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprint(o.DebugOut, args...) - } - } - - if int64(len(hist)) > dictMaxLength { - return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) - } - if len(hist) < 8 { - return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) - } - if len(contents) == 0 { - return nil, errors.New("no content provided") - } - d := dict{ - id: o.ID, - litEnc: nil, - llDec: sequenceDec{}, - ofDec: sequenceDec{}, - mlDec: sequenceDec{}, - offsets: o.Offsets, - content: hist, - } - block := blockEnc{lowMem: false} - block.init() - enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) - if o.Level != 0 { - eOpts := encoderOptions{ - level: o.Level, - blockSize: maxMatchLen, - windowSize: maxMatchLen, - dict: &d, - lowMem: false, - } - enc = eOpts.encoder() - } else { - o.Level = SpeedBestCompression - } - var ( - remain [256]int - ll [256]int - ml [256]int - of [256]int - ) - addValues := func(dst *[256]int, src []byte) { - for _, v := range src { - dst[v]++ - } - } - addHist := func(dst *[256]int, src *[256]uint32) { - for i, v := range src { - dst[i] += int(v) - } - } - seqs := 0 - nUsed := 0 - litTotal := 0 - newOffsets := make(map[uint32]int, 1000) - for _, b := range contents { - block.reset(nil) - if len(b) < 8 { - continue - } - nUsed++ - enc.Reset(&d, true) - enc.Encode(&block, b) - addValues(&remain, block.literals) - litTotal += len(block.literals) - seqs += len(block.sequences) - block.genCodes() - addHist(&ll, block.coders.llEnc.Histogram()) - addHist(&ml, block.coders.mlEnc.Histogram()) - addHist(&of, block.coders.ofEnc.Histogram()) - for i, seq := range block.sequences { - if i > 3 { - break - } - offset := seq.offset - if offset == 0 { - continue - } - if offset > 3 { - newOffsets[offset-3]++ - } else { - newOffsets[uint32(o.Offsets[offset-1])]++ - } - } - } - // Find most used offsets. - var sortedOffsets []uint32 - for k := range newOffsets { - sortedOffsets = append(sortedOffsets, k) - } - sort.Slice(sortedOffsets, func(i, j int) bool { - a, b := sortedOffsets[i], sortedOffsets[j] - if a == b { - // Prefer the longer offset - return sortedOffsets[i] > sortedOffsets[j] - } - return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] - }) - if len(sortedOffsets) > 3 { - if debug { - print("Offsets:") - for i, v := range sortedOffsets { - if i > 20 { - break - } - printf("[%d: %d],", v, newOffsets[v]) - } - println("") - } - - sortedOffsets = sortedOffsets[:3] - } - for i, v := range sortedOffsets { - o.Offsets[i] = int(v) - } - if debug { - println("New repeat offsets", o.Offsets) - } - - if nUsed == 0 || seqs == 0 { - return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) - } - if debug { - println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) - } - if seqs/nUsed < 512 { - // Use 512 as minimum. - nUsed = seqs / 512 - } - copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { - hist := dst.Histogram() - var maxSym uint8 - var maxCount int - var fakeLength int - for i, v := range src { - if v > 0 { - v = v / nUsed - if v == 0 { - v = 1 - } - } - if v > maxCount { - maxCount = v - } - if v != 0 { - maxSym = uint8(i) - } - fakeLength += v - hist[i] = uint32(v) - } - dst.HistogramFinished(maxSym, maxCount) - dst.reUsed = false - dst.useRLE = false - err := dst.normalizeCount(fakeLength) - if err != nil { - return nil, err - } - if debug { - println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) - } - return dst.writeCount(nil) - } - if debug { - print("Literal lengths: ") - } - llTable, err := copyHist(block.coders.llEnc, &ll) - if err != nil { - return nil, err - } - if debug { - print("Match lengths: ") - } - mlTable, err := copyHist(block.coders.mlEnc, &ml) - if err != nil { - return nil, err - } - if debug { - print("Offsets: ") - } - ofTable, err := copyHist(block.coders.ofEnc, &of) - if err != nil { - return nil, err - } - - // Literal table - avgSize := litTotal - if avgSize > huff0.BlockSizeMax/2 { - avgSize = huff0.BlockSizeMax / 2 - } - huffBuff := make([]byte, 0, avgSize) - // Target size - div := litTotal / avgSize - if div < 1 { - div = 1 - } - if debug { - println("Huffman weights:") - } - for i, n := range remain[:] { - if n > 0 { - n = n / div - // Allow all entries to be represented. - if n == 0 { - n = 1 - } - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - if debug { - printf("[%d: %d], ", i, n) - } - } - } - if o.CompatV155 && remain[255]/div == 0 { - huffBuff = append(huffBuff, 255) - } - scratch := &huff0.Scratch{TableLog: 11} - for tries := 0; tries < 255; tries++ { - scratch = &huff0.Scratch{TableLog: 11} - _, _, err = huff0.Compress1X(huffBuff, scratch) - if err == nil { - break - } - if debug { - printf("Try %d: Huffman error: %v\n", tries+1, err) - } - huffBuff = huffBuff[:0] - if tries == 250 { - if debug { - println("Huffman: Bailing out with predefined table") - } - - // Bail out.... Just generate something - huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) - for i := 0; i < 128; i++ { - huffBuff = append(huffBuff, byte(i)) - } - continue - } - if errors.Is(err, huff0.ErrIncompressible) { - // Try truncating least common. - for i, n := range remain[:] { - if n > 0 { - n = n / (div * (i + 1)) - if n > 0 { - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - } - } - } - if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { - huffBuff = append(huffBuff, 255) - } - if len(huffBuff) == 0 { - huffBuff = append(huffBuff, 0, 255) - } - } - if errors.Is(err, huff0.ErrUseRLE) { - for i, n := range remain[:] { - n = n / (div * (i + 1)) - // Allow all entries to be represented. - if n == 0 { - n = 1 - } - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - } - } - } - - var out bytes.Buffer - out.Write([]byte(dictMagic)) - out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) - out.Write(scratch.OutTable) - if debug { - println("huff table:", len(scratch.OutTable), "bytes") - println("of table:", len(ofTable), "bytes") - println("ml table:", len(mlTable), "bytes") - println("ll table:", len(llTable), "bytes") - } - out.Write(ofTable) - out.Write(mlTable) - out.Write(llTable) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) - out.Write(hist) - if debug { - _, err := loadDict(out.Bytes()) - if err != nil { - panic(err) - } - i, err := InspectDictionary(out.Bytes()) - if err != nil { - panic(err) - } - println("ID:", i.ID()) - println("Content size:", i.ContentSize()) - println("Encoder:", i.LitEncoder() != nil) - println("Offsets:", i.Offsets()) - var totalSize int - for _, b := range contents { - totalSize += len(b) - } - - encWith := func(opts ...EOption) int { - enc, err := NewWriter(nil, opts...) - if err != nil { - panic(err) - } - defer enc.Close() - var dst []byte - var totalSize int - for _, b := range contents { - dst = enc.EncodeAll(b, dst[:0]) - totalSize += len(dst) - } - return totalSize - } - plain := encWith(WithEncoderLevel(o.Level)) - withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) - println("Input size:", totalSize) - println("Plain Compressed:", plain) - println("Dict Compressed:", withDict) - println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") - } - return out.Bytes(), nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go deleted file mode 100644 index 5ca46038ad..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ /dev/null @@ -1,173 +0,0 @@ -package zstd - -import ( - "fmt" - "math/bits" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -const ( - dictShardBits = 6 -) - -type fastBase struct { - // cur is the offset at the start of hist - cur int32 - // maximum offset. Should be at least 2x block size. - maxMatchOff int32 - bufferReset int32 - hist []byte - crc *xxhash.Digest - tmp [8]byte - blk *blockEnc - lastDictID uint32 - lowMem bool -} - -// CRC returns the underlying CRC writer. -func (e *fastBase) CRC() *xxhash.Digest { - return e.crc -} - -// AppendCRC will append the CRC to the destination slice and return it. -func (e *fastBase) AppendCRC(dst []byte) []byte { - crc := e.crc.Sum(e.tmp[:0]) - dst = append(dst, crc[7], crc[6], crc[5], crc[4]) - return dst -} - -// WindowSize returns the window size of the encoder, -// or a window size small enough to contain the input size, if > 0. -func (e *fastBase) WindowSize(size int64) int32 { - if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } - return b - } - return e.maxMatchOff -} - -// Block returns the current block. -func (e *fastBase) Block() *blockEnc { - return e.blk -} - -func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > e.bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) - } - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.ensureHist(len(src)) - } else { - if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { - panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) - } - // Move down - offset := int32(len(e.hist)) - e.maxMatchOff - copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:e.maxMatchOff] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -// ensureHist will ensure that history can keep at least this many bytes. -func (e *fastBase) ensureHist(n int) { - if cap(e.hist) >= n { - return - } - l := e.maxMatchOff - if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { - l += maxCompressedBlockSize - } else { - l += e.maxMatchOff - } - // Make it at least 1MB. - if l < 1<<20 && !e.lowMem { - l = 1 << 20 - } - // Make it at least the requested size. - if l < int32(n) { - l = int32(n) - } - e.hist = make([]byte, 0, l) -} - -// useBlock will replace the block with the provided one, -// but transfer recent offsets from the previous. -func (e *fastBase) UseBlock(enc *blockEnc) { - enc.reset(e.blk) - e.blk = enc -} - -func (e *fastBase) matchlen(s, t int32, src []byte) int32 { - if debugAsserts { - if s < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if t < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if s-t > e.maxMatchOff { - err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) - panic(err) - } - if len(src)-int(s) > maxCompressedBlockSize { - panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) - } - } - return int32(matchLen(src[s:], src[t:])) -} - -// Reset the encoding table. -func (e *fastBase) resetBase(d *dict, singleBlock bool) { - if e.blk == nil { - e.blk = &blockEnc{lowMem: e.lowMem} - e.blk.init() - } else { - e.blk.reset(nil) - } - e.blk.initNewEncode() - if e.crc == nil { - e.crc = xxhash.New() - } else { - e.crc.Reset() - } - e.blk.dictLitEnc = nil - if d != nil { - low := e.lowMem - if singleBlock { - e.lowMem = true - } - e.ensureHist(d.ContentSize() + maxCompressedBlockSize) - e.lowMem = low - } - - // We offset current position so everything will be out of reach. - // If above reset line, history will be purged. - if e.cur < e.bufferReset { - e.cur += e.maxMatchOff + int32(len(e.hist)) - } - e.hist = e.hist[:0] - if d != nil { - // Set offsets (currently not used) - for i, off := range d.offsets { - e.blk.recentOffsets[i] = uint32(off) - e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] - } - // Transfer litenc. - e.blk.dictLitEnc = d.litEnc - e.hist = append(e.hist, d.content...) - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go deleted file mode 100644 index 858f8f43a5..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "fmt" - - "github.com/klauspost/compress" -) - -const ( - bestLongTableBits = 22 // Bits used in the long match table - bestLongTableSize = 1 << bestLongTableBits // Size of the table - bestLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - bestShortTableBits = 18 // Bits used in the short match table - bestShortTableSize = 1 << bestShortTableBits // Size of the table - bestShortLen = 4 // Bytes used for table hash - -) - -type match struct { - offset int32 - s int32 - length int32 - rep int32 - est int32 -} - -const highScore = maxMatchLen * 8 - -// estBits will estimate output bits from predefined tables. -func (m *match) estBits(bitsPerByte int32) { - mlc := mlCode(uint32(m.length - zstdMinMatch)) - var ofc uint8 - if m.rep < 0 { - ofc = ofCode(uint32(m.s-m.offset) + 3) - } else { - ofc = ofCode(uint32(m.rep)) - } - // Cost, excluding - ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] - - // Add cost of match encoding... - m.est = int32(ofTT.outBits + mlTT.outBits) - m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) - // Subtract savings compared to literal encoding... - m.est -= (m.length * bitsPerByte) >> 10 - if m.est > 0 { - // Unlikely gain.. - m.length = 0 - m.est = highScore - } -} - -// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type bestFastEncoder struct { - fastBase - table [bestShortTableSize]prevEntry - longTable [bestLongTableSize]prevEntry - dictTable []prevEntry - dictLongTable []prevEntry -} - -// Encode improves compression... -func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 4 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [bestShortTableSize]prevEntry{} - e.longTable = [bestLongTableSize]prevEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - v2 := e.table[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.table[i] = prevEntry{ - offset: v, - prev: v2, - } - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Use this to estimate literal cost. - // Scaled by 10 bits. - bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) - // Huffman can never go < 1 bit/byte - if bitsPerByte < 1024 { - bitsPerByte = 1024 - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - const kSearchStrength = 10 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - offset3 := int32(blk.recentOffsets[2]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - const goodEnough = 250 - - cv := load6432(src, s) - - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - // Set m to a match at offset if it looks like that will improve compression. - improve := func(m *match, offset int32, s int32, first uint32, rep int32) { - delta := s - offset - if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { - return - } - if debugAsserts { - if offset >= s { - panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) - } - if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { - panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) - } - } - // Try to quick reject if we already have a long match. - if m.length > 16 { - left := len(src) - int(m.s+m.length) - // If we are too close to the end, keep as is. - if left <= 0 { - return - } - checkLen := m.length - (s - m.s) - 8 - if left > 2 && checkLen > 4 { - // Check 4 bytes, 4 bytes from the end of the current match. - a := load3232(src, offset+checkLen) - b := load3232(src, s+checkLen) - if a != b { - return - } - } - } - l := 4 + e.matchlen(s+4, offset+4, src) - if rep < 0 { - // Extend candidate match backwards as far as possible. - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { - s-- - offset-- - l++ - } - } - - cand := match{offset: offset, s: s, length: l, rep: rep} - cand.estBits(bitsPerByte) - if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { - *m = cand - } - } - - best := match{s: s, est: highScore} - improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) - improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) - improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) - improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) - - if canRepeat && best.length < goodEnough { - if s == nextEmit { - // Check repeats straight after a match. - improve(&best, s-offset2, s, uint32(cv), 1|4) - improve(&best, s-offset3, s, uint32(cv), 2|4) - if offset1 > 1 { - improve(&best, s-(offset1-1), s, uint32(cv), 3|4) - } - } - - // If either no match or a non-repeat match, check at + 1 - if best.rep <= 0 { - cv32 := uint32(cv >> 8) - spp := s + 1 - improve(&best, spp-offset1, spp, cv32, 1) - improve(&best, spp-offset2, spp, cv32, 2) - improve(&best, spp-offset3, spp, cv32, 3) - if best.rep < 0 { - cv32 = uint32(cv >> 24) - spp += 2 - improve(&best, spp-offset1, spp, cv32, 1) - improve(&best, spp-offset2, spp, cv32, 2) - improve(&best, spp-offset3, spp, cv32, 3) - } - } - } - // Load next and check... - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} - - // Look far ahead, unless we have a really long match already... - if best.length < goodEnough { - // No match found, move forward on input, no need to check forward... - if best.length < 4 { - s += 1 + (s-nextEmit)>>(kSearchStrength-1) - if s >= sLimit { - break encodeLoop - } - continue - } - - candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] - cv = load6432(src, s+1) - cv2 := load6432(src, s+2) - candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] - candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] - - // Short at s+1 - improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) - // Long at s+1, s+2 - improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) - improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) - improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) - improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) - if false { - // Short at s+3. - // Too often worse... - improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) - } - - // Start check at a fixed offset to allow for a few mismatches. - // For this compression level 2 yields the best results. - // We cannot do this if we have already indexed this position. - const skipBeginning = 2 - if best.s > s-skipBeginning { - // See if we can find a better match by checking where the current best ends. - // Use that offset to see if we can find a better full match. - if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) - candidateEnd := e.longTable[nextHashL] - - if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { - improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { - improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - } - } - } - } - } - - if debugAsserts { - if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { - panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) - } - } - - // We have a match, we can store the forward value - if best.rep > 0 { - var seq seq - seq.matchLen = uint32(best.length - zstdMinMatch) - if debugAsserts && s < nextEmit { - panic("s < nextEmit") - } - addLiterals(&seq, best.s) - - // Repeat. If bit 4 is set, this is a non-lit repeat. - seq.offset = uint32(best.rep & 3) - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index old s + 1 -> s - 1 - index0 := s + 1 - s = best.s + best.length - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, best.length) - } - break encodeLoop - } - // Index skipped... - off := index0 + e.cur - for index0 < s { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - off++ - index0++ - } - switch best.rep { - case 2, 4 | 1: - offset1, offset2 = offset2, offset1 - case 3, 4 | 2: - offset1, offset2, offset3 = offset3, offset1, offset2 - case 4 | 3: - offset1, offset2, offset3 = offset1-1, offset1, offset2 - } - continue - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - index0 := s + 1 - s = best.s - t := best.offset - offset1, offset2, offset3 = s-t, offset1, offset2 - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && int(offset1) > len(src) { - panic("invalid offset") - } - - // Write our sequence - var seq seq - l := best.length - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index old s + 1 -> s - 1 - for index0 < s { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - index0++ - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - blk.recentOffsets[2] = uint32(offset3) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Reset will reset and set a dictionary if not nil -func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]prevEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = bestShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 - e.dictTable[nextHash] = prevEntry{ - prev: e.dictTable[nextHash].offset, - offset: i, - } - e.dictTable[nextHash1] = prevEntry{ - prev: e.dictTable[nextHash1].offset, - offset: i + 1, - } - e.dictTable[nextHash2] = prevEntry{ - prev: e.dictTable[nextHash2].offset, - offset: i + 2, - } - e.dictTable[nextHash3] = prevEntry{ - prev: e.dictTable[nextHash3].offset, - offset: i + 3, - } - } - e.lastDictID = d.id - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - } - // Reset table to initial state - copy(e.longTable[:], e.dictLongTable) - - e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go deleted file mode 100644 index 8582f31a7c..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ /dev/null @@ -1,1242 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - betterLongTableBits = 19 // Bits used in the long match table - betterLongTableSize = 1 << betterLongTableBits // Size of the table - betterLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - betterShortTableBits = 13 // Bits used in the short match table - betterShortTableSize = 1 << betterShortTableBits // Size of the table - betterShortLen = 5 // Bytes used for table hash - - betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table - betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard - - betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table - betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard -) - -type prevEntry struct { - offset int32 - prev int32 -} - -// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type betterFastEncoder struct { - fastBase - table [betterShortTableSize]tableEntry - longTable [betterLongTableSize]prevEntry -} - -type betterFastEncoderDict struct { - betterFastEncoder - dictTable []tableEntry - dictLongTable []prevEntry - shortTableShardDirty [betterShortTableShardCnt]bool - longTableShardDirty [betterLongTableShardCnt]bool - allDirty bool -} - -// Encode improves compression... -func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [betterShortTableSize]tableEntry{} - e.longTable = [betterLongTableSize]prevEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += lenght + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - index0 := s + repOff2 - s += lenght + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is around 3 bytes, but depends on input. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 3 - - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - s2 := s + skipBeginning - cv := load3232(src, s2) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched + skipBeginning - if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - s = s2 - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched + skipBeginning - if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - s = s2 - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Encode improves compression... -func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - e.allDirty = true - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.allDirty = true - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += lenght + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - index0 := s + repOff2 - s += lenght + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - cv := load3232(src, s) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("betterFastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = betterShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } - e.dictTable[nextHash3] = tableEntry{ - val: uint32(cv >> 24), - offset: i + 3, - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Reset table to initial state - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterShortTableShardCnt - const shardSize = betterShortTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) - for i := range e.shortTableShardDirty { - e.shortTableShardDirty[i] = false - } - } else { - for i := range e.shortTableShardDirty { - if !e.shortTableShardDirty[i] { - continue - } - - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - e.shortTableShardDirty[i] = false - } - } - } - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterLongTableShardCnt - const shardSize = betterLongTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.longTable[:], e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - } else { - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) - e.longTableShardDirty[i] = false - } - } - } - e.cur = e.maxMatchOff - e.allDirty = false -} - -func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/betterLongTableShardSize] = true -} - -func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { - e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go deleted file mode 100644 index a154c18f74..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ /dev/null @@ -1,1123 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - dFastLongTableBits = 17 // Bits used in the long match table - dFastLongTableSize = 1 << dFastLongTableBits // Size of the table - dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastLongLen = 8 // Bytes used for table hash - - dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard - - dFastShortTableBits = tableBits // Bits used in the short match table - dFastShortTableSize = 1 << dFastShortTableBits // Size of the table - dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastShortLen = 5 // Bytes used for table hash - -) - -type doubleFastEncoder struct { - fastEncoder - longTable [dFastLongTableSize]tableEntry -} - -type doubleFastEncoderDict struct { - fastEncoderDict - longTable [dFastLongTableSize]tableEntry - dictLongTable []tableEntry - longTableShardDirty [dLongTableShardCnt]bool -} - -// Encode mimmics functionality in zstd_dfast.c -func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [dFastShortTableSize]tableEntry{} - e.longTable = [dFastLongTableSize]tableEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += lenght + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - if e.cur >= e.bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - for { - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if len(blk.sequences) > 2 { - if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if len(blk.sequences) <= 2 { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < e.bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += lenght + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - e.markLongShardDirty(nextHashL) - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) - longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) - e.longTable[longHash1] = te0 - e.longTable[longHash2] = te1 - e.markLongShardDirty(longHash1) - e.markLongShardDirty(longHash2) - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) - hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) - e.table[hashVal1] = te0 - e.markShardDirty(hashVal1) - e.table[hashVal2] = te1 - e.markShardDirty(hashVal2) - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // If we encoded more than 64K mark all dirty. - if len(src) > 64<<10 { - e.markAllShardsDirty() - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { - e.fastEncoder.Reset(d, singleBlock) - if d != nil { - panic("doubleFastEncoder: Reset with dict not supported") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { - allDirty := e.allDirty - e.fastEncoderDict.Reset(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]tableEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: e.maxMatchOff, - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: i, - } - } - } - e.lastDictID = d.id - allDirty = true - } - // Reset table to initial state - e.cur = e.maxMatchOff - - dirtyShardCnt := 0 - if !allDirty { - for i := range e.longTableShardDirty { - if e.longTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { - //copy(e.longTable[:], e.dictLongTable) - e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - return - } - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) - *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) - - e.longTableShardDirty[i] = false - } -} - -func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/dLongTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go deleted file mode 100644 index f45a3da7da..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ /dev/null @@ -1,891 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" -) - -const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table - tableShardSize = tableSize / tableShardCnt // Size of an individual shard - tableFastHashLen = 6 - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - maxMatchLength = 131074 -) - -type tableEntry struct { - val uint32 - offset int32 -} - -type fastEncoder struct { - fastBase - table [tableSize]tableEntry -} - -type fastEncoderDict struct { - fastEncoder - dictTable []tableEntry - tableShardDirty [tableShardCnt]bool - allDirty bool -} - -// Encode mimmics functionality in zstd_fast.c -func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if debugEncoder { - if len(src) > maxCompressedBlockSize { - panic("src too big") - } - } - - // Protect against e.cur wraparound. - if e.cur >= e.bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - - for { - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0 ", t)) - } - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < e.bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if e.allDirty || len(src) > 32<<10 { - e.fastEncoder.Encode(blk, src) - e.allDirty = true - return - } - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [tableSize]tableEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 7 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - e.markShardDirty(nextHash2) - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("fastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - if true { - end := e.maxMatchOff + int32(len(d.content)) - 8 - for i := e.maxMatchOff; i < end; i += 2 { - const hashLog = tableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 - nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - } - } - e.lastDictID = d.id - e.allDirty = true - } - - e.cur = e.maxMatchOff - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.tableShardDirty { - if e.tableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - const shardCnt = tableShardCnt - const shardSize = tableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - //copy(e.table[:], e.dictTable) - e.table = *(*[tableSize]tableEntry)(e.dictTable) - for i := range e.tableShardDirty { - e.tableShardDirty[i] = false - } - e.allDirty = false - return - } - for i := range e.tableShardDirty { - if !e.tableShardDirty[i] { - continue - } - - //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) - e.tableShardDirty[i] = false - } - e.allDirty = false -} - -func (e *fastEncoderDict) markAllShardsDirty() { - e.allDirty = true -} - -func (e *fastEncoderDict) markShardDirty(entryNum uint32) { - e.tableShardDirty[entryNum/tableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go deleted file mode 100644 index 72af7ef0fe..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ /dev/null @@ -1,619 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "crypto/rand" - "fmt" - "io" - "math" - rdebug "runtime/debug" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Encoder provides encoding to Zstandard. -// An Encoder can be used for either compressing a stream via the -// io.WriteCloser interface supported by the Encoder or as multiple independent -// tasks via the EncodeAll function. -// Smaller encodes are encouraged to use the EncodeAll function. -// Use NewWriter to create a new instance. -type Encoder struct { - o encoderOptions - encoders chan encoder - state encoderState - init sync.Once -} - -type encoder interface { - Encode(blk *blockEnc, src []byte) - EncodeNoHist(blk *blockEnc, src []byte) - Block() *blockEnc - CRC() *xxhash.Digest - AppendCRC([]byte) []byte - WindowSize(size int64) int32 - UseBlock(*blockEnc) - Reset(d *dict, singleBlock bool) -} - -type encoderState struct { - w io.Writer - filling []byte - current []byte - previous []byte - encoder encoder - writing *blockEnc - err error - writeErr error - nWritten int64 - nInput int64 - frameContentSize int64 - headerWritten bool - eofWritten bool - fullFrameWritten bool - - // This waitgroup indicates an encode is running. - wg sync.WaitGroup - // This waitgroup indicates we have a block encoding/writing. - wWg sync.WaitGroup -} - -// NewWriter will create a new Zstandard encoder. -// If the encoder will be used for encoding blocks a nil writer can be used. -func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { - initPredefined() - var e Encoder - e.o.setDefault() - for _, o := range opts { - err := o(&e.o) - if err != nil { - return nil, err - } - } - if w != nil { - e.Reset(w) - } - return &e, nil -} - -func (e *Encoder) initialize() { - if e.o.concurrent == 0 { - e.o.setDefault() - } - e.encoders = make(chan encoder, e.o.concurrent) - for i := 0; i < e.o.concurrent; i++ { - enc := e.o.encoder() - e.encoders <- enc - } -} - -// Reset will re-initialize the writer and new writes will encode to the supplied writer -// as a new, independent stream. -func (e *Encoder) Reset(w io.Writer) { - s := &e.state - s.wg.Wait() - s.wWg.Wait() - if cap(s.filling) == 0 { - s.filling = make([]byte, 0, e.o.blockSize) - } - if e.o.concurrent > 1 { - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) - } - s.current = s.current[:0] - s.previous = s.previous[:0] - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() - } - if s.encoder == nil { - s.encoder = e.o.encoder() - } - s.filling = s.filling[:0] - s.encoder.Reset(e.o.dict, false) - s.headerWritten = false - s.eofWritten = false - s.fullFrameWritten = false - s.w = w - s.err = nil - s.nWritten = 0 - s.nInput = 0 - s.writeErr = nil - s.frameContentSize = 0 -} - -// ResetContentSize will reset and set a content size for the next stream. -// If the bytes written does not match the size given an error will be returned -// when calling Close(). -// This is removed when Reset is called. -// Sizes <= 0 results in no content size set. -func (e *Encoder) ResetContentSize(w io.Writer, size int64) { - e.Reset(w) - if size >= 0 { - e.state.frameContentSize = size - } -} - -// Write data to the encoder. -// Input data will be buffered and as the buffer fills up -// content will be compressed and written to the output. -// When done writing, use Close to flush the remaining output -// and write CRC if requested. -func (e *Encoder) Write(p []byte) (n int, err error) { - s := &e.state - for len(p) > 0 { - if len(p)+len(s.filling) < e.o.blockSize { - if e.o.crc { - _, _ = s.encoder.CRC().Write(p) - } - s.filling = append(s.filling, p...) - return n + len(p), nil - } - add := p - if len(p)+len(s.filling) > e.o.blockSize { - add = add[:e.o.blockSize-len(s.filling)] - } - if e.o.crc { - _, _ = s.encoder.CRC().Write(add) - } - s.filling = append(s.filling, add...) - p = p[len(add):] - n += len(add) - if len(s.filling) < e.o.blockSize { - return n, nil - } - err := e.nextBlock(false) - if err != nil { - return n, err - } - if debugAsserts && len(s.filling) > 0 { - panic(len(s.filling)) - } - } - return n, nil -} - -// nextBlock will synchronize and start compressing input in e.state.filling. -// If an error has occurred during encoding it will be returned. -func (e *Encoder) nextBlock(final bool) error { - s := &e.state - // Wait for current block. - s.wg.Wait() - if s.err != nil { - return s.err - } - if len(s.filling) > e.o.blockSize { - return fmt.Errorf("block > maxStoreBlockSize") - } - if !s.headerWritten { - // If we have a single block encode, do a sync compression. - if final && len(s.filling) == 0 && !e.o.fullZero { - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) - var n2 int - n2, s.err = s.w.Write(s.current) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - s.nInput += int64(len(s.filling)) - s.current = s.current[:0] - s.filling = s.filling[:0] - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - - var tmp [maxHeaderSize]byte - fh := frameHeader{ - ContentSize: uint64(s.frameContentSize), - WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), - SingleSegment: false, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - dst := fh.appendTo(tmp[:0]) - s.headerWritten = true - s.wWg.Wait() - var n2 int - n2, s.err = s.w.Write(dst) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - } - if s.eofWritten { - // Ensure we only write it once. - final = false - } - - if len(s.filling) == 0 { - // Final block, but no data. - if final { - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - blk.last = true - blk.encodeRaw(nil) - s.wWg.Wait() - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.eofWritten = true - } - return s.err - } - - // SYNC: - if e.o.concurrent == 1 { - src := s.filling - s.nInput += int64(len(s.filling)) - if debugEncoder { - println("Adding sync block,", len(src), "bytes, final:", final) - } - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - - s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if s.err != nil { - return s.err - } - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.filling = s.filling[:0] - return s.err - } - - // Move blocks forward. - s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current - s.nInput += int64(len(s.current)) - s.wg.Add(1) - go func(src []byte) { - if debugEncoder { - println("Adding block,", len(src), "bytes, final:", final) - } - defer func() { - if r := recover(); r != nil { - s.err = fmt.Errorf("panic while encoding: %v", r) - rdebug.PrintStack() - } - s.wg.Done() - }() - enc := s.encoder - blk := enc.Block() - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - // Wait for pending writes. - s.wWg.Wait() - if s.writeErr != nil { - s.err = s.writeErr - return - } - // Transfer encoders from previous write block. - blk.swapEncoders(s.writing) - // Transfer recent offsets to next. - enc.UseBlock(s.writing) - s.writing = blk - s.wWg.Add(1) - go func() { - defer func() { - if r := recover(); r != nil { - s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) - rdebug.PrintStack() - } - s.wWg.Done() - }() - s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if s.writeErr != nil { - return - } - _, s.writeErr = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - }() - }(s.current) - return nil -} - -// ReadFrom reads data from r until EOF or error. -// The return value n is the number of bytes read. -// Any error except io.EOF encountered during the read is also returned. -// -// The Copy function uses ReaderFrom if available. -func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { - if debugEncoder { - println("Using ReadFrom") - } - - // Flush any current writes. - if len(e.state.filling) > 0 { - if err := e.nextBlock(false); err != nil { - return 0, err - } - } - e.state.filling = e.state.filling[:e.o.blockSize] - src := e.state.filling - for { - n2, err := r.Read(src) - if e.o.crc { - _, _ = e.state.encoder.CRC().Write(src[:n2]) - } - // src is now the unfilled part... - src = src[n2:] - n += int64(n2) - switch err { - case io.EOF: - e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] - if debugEncoder { - println("ReadFrom: got EOF final block:", len(e.state.filling)) - } - return n, nil - case nil: - default: - if debugEncoder { - println("ReadFrom: got error:", err) - } - e.state.err = err - return n, err - } - if len(src) > 0 { - if debugEncoder { - println("ReadFrom: got space left in source:", len(src)) - } - continue - } - err = e.nextBlock(false) - if err != nil { - return n, err - } - e.state.filling = e.state.filling[:e.o.blockSize] - src = e.state.filling - } -} - -// Flush will send the currently written data to output -// and block until everything has been written. -// This should only be used on rare occasions where pushing the currently queued data is critical. -func (e *Encoder) Flush() error { - s := &e.state - if len(s.filling) > 0 { - err := e.nextBlock(false) - if err != nil { - return err - } - } - s.wg.Wait() - s.wWg.Wait() - if s.err != nil { - return s.err - } - return s.writeErr -} - -// Close will flush the final output and close the stream. -// The function will block until everything has been written. -// The Encoder can still be re-used after calling this. -func (e *Encoder) Close() error { - s := &e.state - if s.encoder == nil { - return nil - } - err := e.nextBlock(true) - if err != nil { - return err - } - if s.frameContentSize > 0 { - if s.nInput != s.frameContentSize { - return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) - } - } - if e.state.fullFrameWritten { - return s.err - } - s.wg.Wait() - s.wWg.Wait() - - if s.err != nil { - return s.err - } - if s.writeErr != nil { - return s.writeErr - } - - // Write CRC - if e.o.crc && s.err == nil { - // heap alloc. - var tmp [4]byte - _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) - s.nWritten += 4 - } - - // Add padding with content from crypto/rand.Reader - if s.err == nil && e.o.pad > 0 { - add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) - frame, err := skippableFrame(s.filling[:0], add, rand.Reader) - if err != nil { - return err - } - _, s.err = s.w.Write(frame) - } - return s.err -} - -// EncodeAll will encode all input in src and append it to dst. -// This function can be called concurrently, but each call will only run on a single goroutine. -// If empty input is given, nothing is returned, unless WithZeroFrames is specified. -// Encoded blocks can be concatenated and the result will be the combined input stream. -// Data compressed with EncodeAll can be decoded with the Decoder, -// using either a stream or DecodeAll. -func (e *Encoder) EncodeAll(src, dst []byte) []byte { - if len(src) == 0 { - if e.o.fullZero { - // Add frame header. - fh := frameHeader{ - ContentSize: 0, - WindowSize: MinWindowSize, - SingleSegment: true, - // Adding a checksum would be a waste of space. - Checksum: false, - DictID: 0, - } - dst = fh.appendTo(dst) - - // Write raw block as last one only. - var blk blockHeader - blk.setSize(0) - blk.setType(blockTypeRaw) - blk.setLast(true) - dst = blk.appendTo(dst) - } - return dst - } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() - // Use single segments when above minimum window and below window size. - single := len(src) <= e.o.windowSize && len(src) > MinWindowSize - if e.o.single != nil { - single = *e.o.single - } - fh := frameHeader{ - ContentSize: uint64(len(src)), - WindowSize: uint32(enc.WindowSize(int64(len(src)))), - SingleSegment: single, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - // If less than 1MB, allocate a buffer up front. - if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { - dst = make([]byte, 0, len(src)) - } - dst = fh.appendTo(dst) - - // If we can do everything in one block, prefer that. - if len(src) <= e.o.blockSize { - enc.Reset(e.o.dict, true) - // Slightly faster with no history and everything in one block. - if e.o.crc { - _, _ = enc.CRC().Write(src) - } - blk := enc.Block() - blk.last = true - if e.o.dict == nil { - enc.EncodeNoHist(blk, src) - } else { - enc.Encode(blk, src) - } - - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - oldout := blk.output - // Output directly to dst - blk.output = dst - - err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if err != nil { - panic(err) - } - dst = blk.output - blk.output = oldout - } else { - enc.Reset(e.o.dict, false) - blk := enc.Block() - for len(src) > 0 { - todo := src - if len(todo) > e.o.blockSize { - todo = todo[:e.o.blockSize] - } - src = src[len(todo):] - if e.o.crc { - _, _ = enc.CRC().Write(todo) - } - blk.pushOffsets() - enc.Encode(blk, todo) - if len(src) == 0 { - blk.last = true - } - err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) - if err != nil { - panic(err) - } - dst = append(dst, blk.output...) - blk.reset(nil) - } - } - if e.o.crc { - dst = enc.AppendCRC(dst) - } - // Add padding with content from crypto/rand.Reader - if e.o.pad > 0 { - add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) - var err error - dst, err = skippableFrame(dst, add, rand.Reader) - if err != nil { - panic(err) - } - } - return dst -} - -// MaxEncodedSize returns the expected maximum -// size of an encoded block or stream. -func (e *Encoder) MaxEncodedSize(size int) int { - frameHeader := 4 + 2 // magic + frame header & window descriptor - if e.o.dict != nil { - frameHeader += 4 - } - // Frame content size: - if size < 256 { - frameHeader++ - } else if size < 65536+256 { - frameHeader += 2 - } else if size < math.MaxInt32 { - frameHeader += 4 - } else { - frameHeader += 8 - } - // Final crc - if e.o.crc { - frameHeader += 4 - } - - // Max overhead is 3 bytes/block. - // There cannot be 0 blocks. - blocks := (size + e.o.blockSize) / e.o.blockSize - - // Combine, add padding. - maxSz := frameHeader + 3*blocks + size - if e.o.pad > 1 { - maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) - } - return maxSz -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go deleted file mode 100644 index faaf81921c..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ /dev/null @@ -1,339 +0,0 @@ -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - "runtime" - "strings" -) - -// EOption is an option for creating a encoder. -type EOption func(*encoderOptions) error - -// options retains accumulated state of multiple options. -type encoderOptions struct { - concurrent int - level EncoderLevel - single *bool - pad int - blockSize int - windowSize int - crc bool - fullZero bool - noEntropy bool - allLitEntropy bool - customWindow bool - customALEntropy bool - customBlockSize bool - lowMem bool - dict *dict -} - -func (o *encoderOptions) setDefault() { - *o = encoderOptions{ - concurrent: runtime.GOMAXPROCS(0), - crc: true, - single: nil, - blockSize: maxCompressedBlockSize, - windowSize: 8 << 20, - level: SpeedDefault, - allLitEntropy: false, - lowMem: false, - } -} - -// encoder returns an encoder with the selected options. -func (o encoderOptions) encoder() encoder { - switch o.level { - case SpeedFastest: - if o.dict != nil { - return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - } - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - - case SpeedDefault: - if o.dict != nil { - return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} - } - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - case SpeedBetterCompression: - if o.dict != nil { - return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - } - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - } - panic("unknown compression level") -} - -// WithEncoderCRC will add CRC value to output. -// Output will be 4 bytes larger. -func WithEncoderCRC(b bool) EOption { - return func(o *encoderOptions) error { o.crc = b; return nil } -} - -// WithEncoderConcurrency will set the concurrency, -// meaning the maximum number of encoders to run concurrently. -// The value supplied must be at least 1. -// For streams, setting a value of 1 will disable async compression. -// By default this will be set to GOMAXPROCS. -func WithEncoderConcurrency(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("concurrency must be at least 1") - } - o.concurrent = n - return nil - } -} - -// WithWindowSize will set the maximum allowed back-reference distance. -// The value must be a power of two between MinWindowSize and MaxWindowSize. -// A larger value will enable better compression but allocate more memory and, -// for above-default values, take considerably longer. -// The default value is determined by the compression level. -func WithWindowSize(n int) EOption { - return func(o *encoderOptions) error { - switch { - case n < MinWindowSize: - return fmt.Errorf("window size must be at least %d", MinWindowSize) - case n > MaxWindowSize: - return fmt.Errorf("window size must be at most %d", MaxWindowSize) - case (n & (n - 1)) != 0: - return errors.New("window size must be a power of 2") - } - - o.windowSize = n - o.customWindow = true - if o.blockSize > o.windowSize { - o.blockSize = o.windowSize - o.customBlockSize = true - } - return nil - } -} - -// WithEncoderPadding will add padding to all output so the size will be a multiple of n. -// This can be used to obfuscate the exact output size or make blocks of a certain size. -// The contents will be a skippable frame, so it will be invisible by the decoder. -// n must be > 0 and <= 1GB, 1<<30 bytes. -// The padded area will be filled with data from crypto/rand.Reader. -// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. -func WithEncoderPadding(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("padding must be at least 1") - } - // No need to waste our time. - if n == 1 { - n = 0 - } - if n > 1<<30 { - return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") - } - o.pad = n - return nil - } -} - -// EncoderLevel predefines encoder compression levels. -// Only use the constants made available, since the actual mapping -// of these values are very likely to change and your compression could change -// unpredictably when upgrading the library. -type EncoderLevel int - -const ( - speedNotSet EncoderLevel = iota - - // SpeedFastest will choose the fastest reasonable compression. - // This is roughly equivalent to the fastest Zstandard mode. - SpeedFastest - - // SpeedDefault is the default "pretty fast" compression option. - // This is roughly equivalent to the default Zstandard mode (level 3). - SpeedDefault - - // SpeedBetterCompression will yield better compression than the default. - // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. - // By using this, notice that CPU usage may go up in the future. - SpeedBetterCompression - - // SpeedBestCompression will choose the best available compression option. - // This will offer the best compression no matter the CPU cost. - SpeedBestCompression - - // speedLast should be kept as the last actual compression option. - // The is not for external usage, but is used to keep track of the valid options. - speedLast -) - -// EncoderLevelFromString will convert a string representation of an encoding level back -// to a compression level. The compare is not case sensitive. -// If the string wasn't recognized, (false, SpeedDefault) will be returned. -func EncoderLevelFromString(s string) (bool, EncoderLevel) { - for l := speedNotSet + 1; l < speedLast; l++ { - if strings.EqualFold(s, l.String()) { - return true, l - } - } - return false, SpeedDefault -} - -// EncoderLevelFromZstd will return an encoder level that closest matches the compression -// ratio of a specific zstd compression level. -// Many input values will provide the same compression level. -func EncoderLevelFromZstd(level int) EncoderLevel { - switch { - case level < 3: - return SpeedFastest - case level >= 3 && level < 6: - return SpeedDefault - case level >= 6 && level < 10: - return SpeedBetterCompression - default: - return SpeedBestCompression - } -} - -// String provides a string representation of the compression level. -func (e EncoderLevel) String() string { - switch e { - case SpeedFastest: - return "fastest" - case SpeedDefault: - return "default" - case SpeedBetterCompression: - return "better" - case SpeedBestCompression: - return "best" - default: - return "invalid" - } -} - -// WithEncoderLevel specifies a predefined compression level. -func WithEncoderLevel(l EncoderLevel) EOption { - return func(o *encoderOptions) error { - switch { - case l <= speedNotSet || l >= speedLast: - return fmt.Errorf("unknown encoder level") - } - o.level = l - if !o.customWindow { - switch o.level { - case SpeedFastest: - o.windowSize = 4 << 20 - if !o.customBlockSize { - o.blockSize = 1 << 16 - } - case SpeedDefault: - o.windowSize = 8 << 20 - case SpeedBetterCompression: - o.windowSize = 16 << 20 - case SpeedBestCompression: - o.windowSize = 32 << 20 - } - } - if !o.customALEntropy { - o.allLitEntropy = l > SpeedDefault - } - - return nil - } -} - -// WithZeroFrames will encode 0 length input as full frames. -// This can be needed for compatibility with zstandard usage, -// but is not needed for this package. -func WithZeroFrames(b bool) EOption { - return func(o *encoderOptions) error { - o.fullZero = b - return nil - } -} - -// WithAllLitEntropyCompression will apply entropy compression if no matches are found. -// Disabling this will skip incompressible data faster, but in cases with no matches but -// skewed character distribution compression is lost. -// Default value depends on the compression level selected. -func WithAllLitEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.customALEntropy = true - o.allLitEntropy = b - return nil - } -} - -// WithNoEntropyCompression will always skip entropy compression of literals. -// This can be useful if content has matches, but unlikely to benefit from entropy -// compression. Usually the slight speed improvement is not worth enabling this. -func WithNoEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.noEntropy = b - return nil - } -} - -// WithSingleSegment will set the "single segment" flag when EncodeAll is used. -// If this flag is set, data must be regenerated within a single continuous memory segment. -// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. -// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. -// In order to preserve the decoder from unreasonable memory requirements, -// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. -// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. -// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. -// If this is not specified, block encodes will automatically choose this based on the input size and the window size. -// This setting has no effect on streamed encodes. -func WithSingleSegment(b bool) EOption { - return func(o *encoderOptions) error { - o.single = &b - return nil - } -} - -// WithLowerEncoderMem will trade in some memory cases trade less memory usage for -// slower encoding speed. -// This will not change the window size which is the primary function for reducing -// memory usage. See WithWindowSize. -func WithLowerEncoderMem(b bool) EOption { - return func(o *encoderOptions) error { - o.lowMem = b - return nil - } -} - -// WithEncoderDict allows to register a dictionary that will be used for the encode. -// -// The slice dict must be in the [dictionary format] produced by -// "zstd --train" from the Zstandard reference implementation. -// -// The encoder *may* choose to use no dictionary instead for certain payloads. -// -// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format -func WithEncoderDict(dict []byte) EOption { - return func(o *encoderOptions) error { - d, err := loadDict(dict) - if err != nil { - return err - } - o.dict = d - return nil - } -} - -// WithEncoderDictRaw registers a dictionary that may be used by the encoder. -// -// The slice content may contain arbitrary data. It will be used as an initial -// history. -func WithEncoderDictRaw(id uint32, content []byte) EOption { - return func(o *encoderOptions) error { - if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { - return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) - } - o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go deleted file mode 100644 index 53e160f7e5..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ /dev/null @@ -1,413 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "encoding/hex" - "errors" - "io" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type frameDec struct { - o decoderOptions - crc *xxhash.Digest - - WindowSize uint64 - - // Frame history passed between blocks - history history - - rawInput byteBuffer - - // Byte buffer that can be reused for small input blocks. - bBuf byteBuf - - FrameContentSize uint64 - - DictionaryID uint32 - HasCheckSum bool - SingleSegment bool -} - -const ( - // MinWindowSize is the minimum Window Size, which is 1 KB. - MinWindowSize = 1 << 10 - - // MaxWindowSize is the maximum encoder window size - // and the default decoder maximum window size. - MaxWindowSize = 1 << 29 -) - -const ( - frameMagic = "\x28\xb5\x2f\xfd" - skippableFrameMagic = "\x2a\x4d\x18" -) - -func newFrameDec(o decoderOptions) *frameDec { - if o.maxWindowSize > o.maxDecodedSize { - o.maxWindowSize = o.maxDecodedSize - } - d := frameDec{ - o: o, - } - return &d -} - -// reset will read the frame header and prepare for block decoding. -// If nothing can be read from the input, io.EOF will be returned. -// Any other error indicated that the stream contained data, but -// there was a problem. -func (d *frameDec) reset(br byteBuffer) error { - d.HasCheckSum = false - d.WindowSize = 0 - var signature [4]byte - for { - var err error - // Check if we can read more... - b, err := br.readSmall(1) - switch err { - case io.EOF, io.ErrUnexpectedEOF: - return io.EOF - case nil: - signature[0] = b[0] - default: - return err - } - // Read the rest, don't allow io.ErrUnexpectedEOF - b, err = br.readSmall(3) - switch err { - case io.EOF: - return io.EOF - case nil: - copy(signature[1:], b) - default: - return err - } - - if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { - if debugDecoder { - println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) - } - // Break if not skippable frame. - break - } - // Read size to skip - b, err = br.readSmall(4) - if err != nil { - if debugDecoder { - println("Reading Frame Size", err) - } - return err - } - n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - println("Skipping frame with", n, "bytes.") - err = br.skipN(int64(n)) - if err != nil { - if debugDecoder { - println("Reading discarded frame", err) - } - return err - } - } - if string(signature[:]) != frameMagic { - if debugDecoder { - println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) - } - return ErrMagicMismatch - } - - // Read Frame_Header_Descriptor - fhd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Frame_Header_Descriptor", err) - } - return err - } - d.SingleSegment = fhd&(1<<5) != 0 - - if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - d.WindowSize = 0 - if !d.SingleSegment { - wd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Window_Descriptor", err) - } - return err - } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - d.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = 0 - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - - b, err := br.readSmall(int(size)) - if err != nil { - println("Reading Dictionary_ID", err) - return err - } - var id uint32 - switch len(b) { - case 1: - id = uint32(b[0]) - case 2: - id = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - if debugDecoder { - println("Dict size", size, "ID:", id) - } - d.DictionaryID = id - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if d.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - d.FrameContentSize = fcsUnknown - if fcsSize > 0 { - b, err := br.readSmall(fcsSize) - if err != nil { - println("Reading Frame content", err) - return err - } - switch len(b) { - case 1: - d.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - if debugDecoder { - println("Read FCS:", d.FrameContentSize) - } - } - - // Move this to shared. - d.HasCheckSum = fhd&(1<<2) != 0 - if d.HasCheckSum { - if d.crc == nil { - d.crc = xxhash.New() - } - d.crc.Reset() - } - - if d.WindowSize > d.o.maxWindowSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrWindowSizeExceeded - } - - if d.WindowSize == 0 && d.SingleSegment { - // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } - if d.WindowSize > d.o.maxDecodedSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrDecoderSizeExceeded - } - } - - // The minimum Window_Size is 1 KB. - if d.WindowSize < MinWindowSize { - if debugDecoder { - println("got window size: ", d.WindowSize) - } - return ErrWindowSizeTooSmall - } - d.history.windowSize = int(d.WindowSize) - if !d.o.lowMem || d.history.windowSize < maxBlockSize { - // Alloc 2x window size if not low-mem, or window size below 2MB. - d.history.allocFrameBuffer = d.history.windowSize * 2 - } else { - if d.o.lowMem { - // Alloc with 1MB extra. - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 - } else { - // Alloc with 2MB extra. - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize - } - } - - if debugDecoder { - println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) - } - - // history contains input - maybe we do something - d.rawInput = br - return nil -} - -// next will start decoding the next block from stream. -func (d *frameDec) next(block *blockDec) error { - if debugDecoder { - println("decoding new block") - } - err := block.reset(d.rawInput, d.WindowSize) - if err != nil { - println("block error:", err) - // Signal the frame decoder we have a problem. - block.sendErr(err) - return err - } - return nil -} - -// checkCRC will check the checksum, assuming the frame has one. -// Will return ErrCRCMismatch if crc check failed, otherwise nil. -func (d *frameDec) checkCRC() error { - // We can overwrite upper tmp now - buf, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } - - want := binary.LittleEndian.Uint32(buf[:4]) - got := uint32(d.crc.Sum64()) - - if got != want { - if debugDecoder { - printf("CRC check failed: got %08x, want %08x\n", got, want) - } - return ErrCRCMismatch - } - if debugDecoder { - printf("CRC ok %08x\n", got) - } - return nil -} - -// consumeCRC skips over the checksum, assuming the frame has one. -func (d *frameDec) consumeCRC() error { - _, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - } - return err -} - -// runDecoder will run the decoder for the remainder of the frame. -func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { - saved := d.history.b - - // We use the history for output to avoid copying it. - d.history.b = dst - d.history.ignoreBuffer = len(dst) - // Store input length, so we only check new data. - crcStart := len(dst) - d.history.decoders.maxSyncLen = 0 - if d.o.limitToCap { - d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) - } - if d.FrameContentSize != fcsUnknown { - if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { - d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) - } - if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { - if debugDecoder { - println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) - } - return dst, ErrDecoderSizeExceeded - } - if debugDecoder { - println("maxSyncLen:", d.history.decoders.maxSyncLen) - } - if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { - // Alloc for output - dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - var err error - for { - err = dec.reset(d.rawInput, d.WindowSize) - if err != nil { - break - } - if debugDecoder { - println("next block:", dec) - } - err = dec.decodeBuf(&d.history) - if err != nil { - break - } - if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { - println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) - err = ErrDecoderSizeExceeded - break - } - if d.o.limitToCap && len(d.history.b) > cap(dst) { - println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) - err = ErrDecoderSizeExceeded - break - } - if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { - println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) - err = ErrFrameSizeExceeded - break - } - if dec.Last { - break - } - if debugDecoder { - println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) - } - } - dst = d.history.b - if err == nil { - if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { - err = ErrFrameSizeMismatch - } else if d.HasCheckSum { - if d.o.ignoreChecksum { - err = d.consumeCRC() - } else { - d.crc.Write(dst[crcStart:]) - err = d.checkCRC() - } - } - } - d.history.b = saved - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go deleted file mode 100644 index 2f5d5ed454..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "fmt" - "io" - "math" - "math/bits" -) - -type frameHeader struct { - ContentSize uint64 - WindowSize uint32 - SingleSegment bool - Checksum bool - DictID uint32 -} - -const maxHeaderSize = 14 - -func (f frameHeader) appendTo(dst []byte) []byte { - dst = append(dst, frameMagic...) - var fhd uint8 - if f.Checksum { - fhd |= 1 << 2 - } - if f.SingleSegment { - fhd |= 1 << 5 - } - - var dictIDContent []byte - if f.DictID > 0 { - var tmp [4]byte - if f.DictID < 256 { - fhd |= 1 - tmp[0] = uint8(f.DictID) - dictIDContent = tmp[:1] - } else if f.DictID < 1<<16 { - fhd |= 2 - binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) - dictIDContent = tmp[:2] - } else { - fhd |= 3 - binary.LittleEndian.PutUint32(tmp[:4], f.DictID) - dictIDContent = tmp[:4] - } - } - var fcs uint8 - if f.ContentSize >= 256 { - fcs++ - } - if f.ContentSize >= 65536+256 { - fcs++ - } - if f.ContentSize >= 0xffffffff { - fcs++ - } - - fhd |= fcs << 6 - - dst = append(dst, fhd) - if !f.SingleSegment { - const winLogMin = 10 - windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 - dst = append(dst, uint8(windowLog)) - } - if f.DictID > 0 { - dst = append(dst, dictIDContent...) - } - switch fcs { - case 0: - if f.SingleSegment { - dst = append(dst, uint8(f.ContentSize)) - } - // Unless SingleSegment is set, framessizes < 256 are nto stored. - case 1: - f.ContentSize -= 256 - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) - case 2: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) - case 3: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), - uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) - default: - panic("invalid fcs") - } - return dst -} - -const skippableFrameHeader = 4 + 4 - -// calcSkippableFrame will return a total size to be added for written -// to be divisible by multiple. -// The value will always be > skippableFrameHeader. -// The function will panic if written < 0 or wantMultiple <= 0. -func calcSkippableFrame(written, wantMultiple int64) int { - if wantMultiple <= 0 { - panic("wantMultiple <= 0") - } - if written < 0 { - panic("written < 0") - } - leftOver := written % wantMultiple - if leftOver == 0 { - return 0 - } - toAdd := wantMultiple - leftOver - for toAdd < skippableFrameHeader { - toAdd += wantMultiple - } - return int(toAdd) -} - -// skippableFrame will add a skippable frame with a total size of bytes. -// total should be >= skippableFrameHeader and < math.MaxUint32. -func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { - if total == 0 { - return dst, nil - } - if total < skippableFrameHeader { - return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) - } - if int64(total) > math.MaxUint32 { - return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) - } - dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) - f := uint32(total - skippableFrameHeader) - dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) - start := len(dst) - dst = append(dst, make([]byte, f)...) - _, err := io.ReadFull(r, dst[start:]) - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go deleted file mode 100644 index 2f8860a722..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -const ( - tablelogAbsoluteMax = 9 -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = tablelogAbsoluteMax + 2 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - maxTableMask = (1 << maxTableLog) - 1 - minTablelog = 5 - maxSymbolValue = 255 -) - -// fseDecoder provides temporary storage for compression and decompression. -type fseDecoder struct { - dt [maxTablesize]decSymbol // Decompression table. - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - maxBits uint8 // Maximum number of additional bits - - // used for table creation to avoid allocations. - stateTable [256]uint16 - norm [maxSymbolValue + 1]int16 - preDefined bool -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -// readNCount will read the symbol distribution so decoding tables can be constructed. -func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { - var ( - charnum uint16 - previous0 bool - ) - if b.remain() < 4 { - return errors.New("input too small") - } - bitStream := b.Uint32NC() - nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog - if nbBits > tablelogAbsoluteMax { - println("Invalid tablelog:", nbBits) - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 && charnum <= maxSymbol { - if previous0 { - //println("prev0") - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - //println("24 x 0") - n0 += 24 - if r := b.remain(); r > 5 { - b.advance(2) - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - // end of bit stream - bitStream >>= 16 - bitCount += 16 - } - } - //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) - for charnum < n0 { - s.norm[uint8(charnum)] = 0 - charnum++ - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*threshold - 1) - remaining - var count int32 - - if int32(bitStream)&(threshold-1) < max { - count = int32(bitStream) & (threshold - 1) - if debugAsserts && nbBits < 1 { - panic("nbBits underflow") - } - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - // extra accuracy - count-- - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> (bitCount & 31) - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - bitStream = b.Uint32() >> (bitCount & 31) - } - } - s.symbolLen = charnum - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return s.buildDtable() -} - -func (s *fseDecoder) mustReadFrom(r io.Reader) { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - // dt [maxTablesize]decSymbol // Decompression table. - // symbolLen uint16 // Length of active part of the symbol table. - // actualTableLog uint8 // Selected tablelog. - // maxBits uint8 // Maximum number of additional bits - // // used for table creation to avoid allocations. - // stateTable [256]uint16 - // norm [maxSymbolValue + 1]int16 - // preDefined bool - fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -// Using a composite uint64 is faster than a struct with separate members. -type decSymbol uint64 - -func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { - return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - -func (d decSymbol) nbBits() uint8 { - return uint8(d) -} - -func (d decSymbol) addBits() uint8 { - return uint8(d >> 8) -} - -func (d decSymbol) newState() uint16 { - return uint16(d >> 16) -} - -func (d decSymbol) baselineInt() int { - return int(d >> 32) -} - -func (d *decSymbol) setNBits(nBits uint8) { - const mask = 0xffffffffffffff00 - *d = (*d & mask) | decSymbol(nBits) -} - -func (d *decSymbol) setAddBits(addBits uint8) { - const mask = 0xffffffffffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) -} - -func (d *decSymbol) setNewState(state uint16) { - const mask = 0xffffffff0000ffff - *d = (*d & mask) | decSymbol(state)<<16 -} - -func (d *decSymbol) setExt(addBits uint8, baseline uint32) { - const mask = 0xffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) -} - -// decSymbolValue returns the transformed decSymbol for the given symbol. -func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { - if int(symb) >= len(t) { - return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) - } - lu := t[symb] - return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil -} - -// setRLE will set the decoder til RLE mode. -func (s *fseDecoder) setRLE(symbol decSymbol) { - s.actualTableLog = 0 - s.maxBits = symbol.addBits() - s.dt[0] = symbol -} - -// transform will transform the decoder table into a table usable for -// decoding without having to apply the transformation while decoding. -// The state will contain the base value and the number of bits to read. -func (s *fseDecoder) transform(t []baseOffset) error { - tableSize := uint16(1 << s.actualTableLog) - s.maxBits = 0 - for i, v := range s.dt[:tableSize] { - add := v.addBits() - if int(add) >= len(t) { - return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) - } - lu := t[add] - if lu.addBits > s.maxBits { - s.maxBits = lu.addBits - } - v.setExt(lu.addBits, lu.baseLine) - s.dt[i] = v - } - return nil -} - -type fseState struct { - dt []decSymbol - state decSymbol -} - -// Initialize and decodeAsync first state and symbol. -func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { - s.dt = dt - br.fill() - s.state = dt[br.getBits(tableLog)] -} - -// final returns the current state symbol without decoding the next. -func (s decSymbol) final() (int, uint8) { - return s.baselineInt(), s.addBits() -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go deleted file mode 100644 index d04a829b0a..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go +++ /dev/null @@ -1,65 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" -) - -type buildDtableAsmContext struct { - // inputs - stateTable *uint16 - norm *int16 - dt *uint64 - - // outputs --- set by the procedure in the case of error; - // for interpretation please see the error handling part below - errParam1 uint64 - errParam2 uint64 -} - -// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. -// Function returns non-zero exit code on error. -// -//go:noescape -func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int - -// please keep in sync with _generate/gen_fse.go -const ( - errorCorruptedNormalizedCounter = 1 - errorNewStateTooBig = 2 - errorNewStateNoBits = 3 -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - ctx := buildDtableAsmContext{ - stateTable: &s.stateTable[0], - norm: &s.norm[0], - dt: (*uint64)(&s.dt[0]), - } - code := buildDtable_asm(s, &ctx) - - if code != 0 { - switch code { - case errorCorruptedNormalizedCounter: - position := ctx.errParam1 - return fmt.Errorf("corrupted input (position=%d, expected 0)", position) - - case errorNewStateTooBig: - newState := decSymbol(ctx.errParam1) - size := ctx.errParam2 - return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) - - case errorNewStateNoBits: - newState := decSymbol(ctx.errParam1) - oldState := decSymbol(ctx.errParam2) - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) - - default: - return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) - } - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s deleted file mode 100644 index bcde398695..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s +++ /dev/null @@ -1,126 +0,0 @@ -// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int -TEXT ·buildDtable_asm(SB), $0-24 - MOVQ ctx+8(FP), CX - MOVQ s+0(FP), DI - - // Load values - MOVBQZX 4098(DI), DX - XORQ AX, AX - BTSQ DX, AX - MOVQ (CX), BX - MOVQ 16(CX), SI - LEAQ -1(AX), R8 - MOVQ 8(CX), CX - MOVWQZX 4096(DI), DI - - // End load values - // Init, lay down lowprob symbols - XORQ R9, R9 - JMP init_main_loop_condition - -init_main_loop: - MOVWQSX (CX)(R9*2), R10 - CMPW R10, $-1 - JNE do_not_update_high_threshold - MOVB R9, 1(SI)(R8*8) - DECQ R8 - MOVQ $0x0000000000000001, R10 - -do_not_update_high_threshold: - MOVW R10, (BX)(R9*2) - INCQ R9 - -init_main_loop_condition: - CMPQ R9, DI - JL init_main_loop - - // Spread symbols - // Calculate table step - MOVQ AX, R9 - SHRQ $0x01, R9 - MOVQ AX, R10 - SHRQ $0x03, R10 - LEAQ 3(R9)(R10*1), R9 - - // Fill add bits values - LEAQ -1(AX), R10 - XORQ R11, R11 - XORQ R12, R12 - JMP spread_main_loop_condition - -spread_main_loop: - XORQ R13, R13 - MOVWQSX (CX)(R12*2), R14 - JMP spread_inner_loop_condition - -spread_inner_loop: - MOVB R12, 1(SI)(R11*8) - -adjust_position: - ADDQ R9, R11 - ANDQ R10, R11 - CMPQ R11, R8 - JG adjust_position - INCQ R13 - -spread_inner_loop_condition: - CMPQ R13, R14 - JL spread_inner_loop - INCQ R12 - -spread_main_loop_condition: - CMPQ R12, DI - JL spread_main_loop - TESTQ R11, R11 - JZ spread_check_ok - MOVQ ctx+8(FP), AX - MOVQ R11, 24(AX) - MOVQ $+1, ret+16(FP) - RET - -spread_check_ok: - // Build Decoding table - XORQ DI, DI - -build_table_main_table: - MOVBQZX 1(SI)(DI*8), CX - MOVWQZX (BX)(CX*2), R8 - LEAQ 1(R8), R9 - MOVW R9, (BX)(CX*2) - MOVQ R8, R9 - BSRQ R9, R9 - MOVQ DX, CX - SUBQ R9, CX - SHLQ CL, R8 - SUBQ AX, R8 - MOVB CL, (SI)(DI*8) - MOVW R8, 2(SI)(DI*8) - CMPQ R8, AX - JLE build_table_check1_ok - MOVQ ctx+8(FP), CX - MOVQ R8, 24(CX) - MOVQ AX, 32(CX) - MOVQ $+2, ret+16(FP) - RET - -build_table_check1_ok: - TESTB CL, CL - JNZ build_table_check2_ok - CMPW R8, DI - JNE build_table_check2_ok - MOVQ ctx+8(FP), AX - MOVQ R8, 24(AX) - MOVQ DI, 32(AX) - MOVQ $+3, ret+16(FP) - RET - -build_table_check2_ok: - INCQ DI - CMPQ DI, AX - JL build_table_main_table - MOVQ $+0, ret+16(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go deleted file mode 100644 index 332e51fe44..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "errors" - "fmt" -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) - } - } - } - - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go deleted file mode 100644 index ab26326a8f..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" -) - -const ( - // For encoding we only support up to - maxEncTableLog = 8 - maxEncTablesize = 1 << maxTableLog - maxEncTableMask = (1 << maxTableLog) - 1 - minEncTablelog = 5 - maxEncSymbolValue = maxMatchLengthSymbol -) - -// Scratch provides temporary storage for compression and decompression. -type fseEncoder struct { - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - ct cTable // Compression tables. - maxCount int // count of the most probable symbol - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - useRLE bool // This encoder is for RLE - preDefined bool // This encoder is predefined. - reUsed bool // Set to know when the encoder has been reused. - rleVal uint8 // RLE Symbol - maxBits uint8 // Maximum output bits after transform. - - // TODO: Technically zstd should be fine with 64 bytes. - count [256]uint32 - norm [256]int16 -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaNbBits uint32 - deltaFindState int16 - outBits uint8 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -func (s *fseEncoder) Histogram() *[256]uint32 { - return &s.count -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *fseEncoder) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *fseEncoder) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [256]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = total - 1 - total++ - default: - maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = total - v - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -func (s *fseEncoder) setRLE(val byte) { - s.allocCtable() - s.actualTableLog = 0 - s.ct.stateTable = s.ct.stateTable[:1] - s.ct.symbolTT[val] = symbolTransform{ - deltaFindState: 0, - deltaNbBits: 0, - } - if debugEncoder { - println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) - } - s.rleVal = val - s.useRLE = true -} - -// setBits will set output bits for the transform. -// if nil is provided, the number of bits is equal to the index. -func (s *fseEncoder) setBits(transform []byte) { - if s.reUsed || s.preDefined { - return - } - if s.useRLE { - if transform == nil { - s.ct.symbolTT[s.rleVal].outBits = s.rleVal - s.maxBits = s.rleVal - return - } - s.maxBits = transform[s.rleVal] - s.ct.symbolTT[s.rleVal].outBits = s.maxBits - return - } - if transform == nil { - for i := range s.ct.symbolTT[:s.symbolLen] { - s.ct.symbolTT[i].outBits = uint8(i) - } - s.maxBits = uint8(s.symbolLen - 1) - return - } - s.maxBits = 0 - for i, v := range transform[:s.symbolLen] { - s.ct.symbolTT[i].outBits = v - if v > s.maxBits { - // We could assume bits always going up, but we play safe. - s.maxBits = v - } - } -} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -// If successful, compression tables will also be made ready. -func (s *fseEncoder) normalizeCount(length int) error { - if s.reUsed { - return nil - } - s.optimalTableLog(length) - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(length) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(length >> tableLog) - ) - if s.maxCount == length { - s.useRLE = true - return nil - } - s.useRLE = false - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - err := s.normalizeCount2(length) - if err != nil { - return err - } - if debugAsserts { - err = s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() - } - s.norm[largest] += stillToDistribute - if debugAsserts { - err := s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *fseEncoder) normalizeCount2(length int) error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(length) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *fseEncoder) optimalTableLog(length int) { - tableLog := uint8(maxEncTableLog) - minBitsSrc := highBit(uint32(length)) + 1 - minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 - minBits := uint8(minBitsSymbols) - if minBitsSrc < minBitsSymbols { - minBits = uint8(minBitsSrc) - } - - maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minEncTablelog { - tableLog = minEncTablelog - } - if tableLog > maxEncTableLog { - tableLog = maxEncTableLog - } - s.actualTableLog = tableLog -} - -// validateNorm validates the normalized histogram table. -func (s *fseEncoder) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 - - // Write Table Size - bitStream = uint32(tableLog - minEncTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - outP = len(out) - ) - if cap(out) < outP+maxHeaderSize { - out = append(out, make([]byte, maxHeaderSize*3)...) - out = out[:len(out)-maxHeaderSize*3] - } - out = out[:outP+maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return nil, errors.New("internal error: remaining < 1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - if outP+2 > len(out) { - return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) - } - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += int((bitCount + 7) / 8) - - if charnum > s.symbolLen { - return nil, errors.New("internal error: charnum > s.symbolLen") - } - return out[:outP], nil -} - -// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) -// note 1 : assume symbolValue is valid (<= maxSymbolValue) -// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * -func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { - minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 - threshold := (minNbBits + 1) << 16 - if debugAsserts { - if !(s.actualTableLog < 16) { - panic("!s.actualTableLog < 16") - } - // ensure enough room for renormalization double shift - if !(uint8(accuracyLog) < 31-s.actualTableLog) { - panic("!uint8(accuracyLog) < 31-s.actualTableLog") - } - } - tableSize := uint32(1) << s.actualTableLog - deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) - // linear interpolation (very approximate) - normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog - bitMultiplier := uint32(1) << accuracyLog - if debugAsserts { - if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { - panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") - } - if normalizedDeltaFromThreshold > bitMultiplier { - panic("normalizedDeltaFromThreshold > bitMultiplier") - } - } - return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold -} - -// Returns the cost in bits of encoding the distribution in count using ctable. -// Histogram should only be up to the last non-zero symbol. -// Returns an -1 if ctable cannot represent all the symbols in count. -func (s *fseEncoder) approxSize(hist []uint32) uint32 { - if int(s.symbolLen) < len(hist) { - // More symbols than we have. - return math.MaxUint32 - } - if s.useRLE { - // We will never reuse RLE encoders. - return math.MaxUint32 - } - const kAccuracyLog = 8 - badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog - var cost uint32 - for i, v := range hist { - if v == 0 { - continue - } - if s.norm[i] == 0 { - return math.MaxUint32 - } - bitCost := s.bitCost(uint8(i), kAccuracyLog) - if bitCost > badCost { - return math.MaxUint32 - } - cost += v * bitCost - } - return cost >> kAccuracyLog -} - -// maxHeaderSize returns the maximum header size in bits. -// This is not exact size, but we want a penalty for new tables anyway. -func (s *fseEncoder) maxHeaderSize() uint32 { - if s.preDefined { - return 0 - } - if s.useRLE { - return 8 - } - return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - if len(c.stateTable) == 1 { - // RLE - c.stateTable[0] = uint16(0) - c.state = 0 - return - } - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + int32(first.deltaFindState) - c.state = c.stateTable[lu] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go deleted file mode 100644 index 474cb77d2b..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "math" - "sync" -) - -var ( - // fsePredef are the predefined fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredef [3]fseDecoder - - // fsePredefEnc are the predefined encoder based on fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredefEnc [3]fseEncoder - - // symbolTableX contain the transformations needed for each type as defined in - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - symbolTableX [3][]baseOffset - - // maxTableSymbol is the biggest supported symbol for each table type - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} - - // bitTables is the bits table for each table. - bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} -) - -type tableIndex uint8 - -const ( - // indexes for fsePredef and symbolTableX - tableLiteralLengths tableIndex = 0 - tableOffsets tableIndex = 1 - tableMatchLengths tableIndex = 2 - - maxLiteralLengthSymbol = 35 - maxOffsetLengthSymbol = 30 - maxMatchLengthSymbol = 52 -) - -// baseOffset is used for calculating transformations. -type baseOffset struct { - baseLine uint32 - addBits uint8 -} - -// fillBase will precalculate base offsets with the given bit distributions. -func fillBase(dst []baseOffset, base uint32, bits ...uint8) { - if len(bits) != len(dst) { - panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) - } - for i, bit := range bits { - if base > math.MaxInt32 { - panic("invalid decoding table, base overflows int32") - } - - dst[i] = baseOffset{ - baseLine: base, - addBits: bit, - } - base += 1 << bit - } -} - -var predef sync.Once - -func initPredefined() { - predef.Do(func() { - // Literals length codes - tmp := make([]baseOffset, 36) - for i := range tmp[:16] { - tmp[i] = baseOffset{ - baseLine: uint32(i), - addBits: 0, - } - } - fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableLiteralLengths] = tmp - - // Match length codes - tmp = make([]baseOffset, 53) - for i := range tmp[:32] { - tmp[i] = baseOffset{ - // The transformation adds the 3 length. - baseLine: uint32(i) + 3, - addBits: 0, - } - } - fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableMatchLengths] = tmp - - // Offset codes - tmp = make([]baseOffset, maxOffsetBits+1) - tmp[1] = baseOffset{ - baseLine: 1, - addBits: 1, - } - fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) - symbolTableX[tableOffsets] = tmp - - // Fill predefined tables and transform them. - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - for i := range fsePredef[:] { - f := &fsePredef[i] - switch tableIndex(i) { - case tableLiteralLengths: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 - f.actualTableLog = 6 - copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, - -1, -1, -1, -1}) - f.symbolLen = 36 - case tableOffsets: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 - f.actualTableLog = 5 - copy(f.norm[:], []int16{ - 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) - f.symbolLen = 29 - case tableMatchLengths: - //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 - f.actualTableLog = 6 - copy(f.norm[:], []int16{ - 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, - -1, -1, -1, -1, -1}) - f.symbolLen = 53 - } - if err := f.buildDtable(); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - if err := f.transform(symbolTableX[i]); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - f.preDefined = true - - // Create encoder as well - enc := &fsePredefEnc[i] - copy(enc.norm[:], f.norm[:]) - enc.symbolLen = f.symbolLen - enc.actualTableLog = f.actualTableLog - if err := enc.buildCTable(); err != nil { - panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) - } - enc.setBits(bitTables[i]) - enc.preDefined = true - } - }) -} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go deleted file mode 100644 index 5d73c21ebd..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -const ( - prime3bytes = 506832829 - prime4bytes = 2654435761 - prime5bytes = 889523592379 - prime6bytes = 227718039650203 - prime7bytes = 58295818150454627 - prime8bytes = 0xcf1bbcdcb7a56463 -) - -// hashLen returns a hash of the lowest mls bytes of with length output bits. -// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. -// length should always be < 32. -// Preferably length and mls should be a constant for inlining. -func hashLen(u uint64, length, mls uint8) uint32 { - switch mls { - case 3: - return (uint32(u<<8) * prime3bytes) >> (32 - length) - case 5: - return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) - case 6: - return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) - case 7: - return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) - case 8: - return uint32((u * prime8bytes) >> (64 - length)) - default: - return (uint32(u) * prime4bytes) >> (32 - length) - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go deleted file mode 100644 index 09164856d2..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "github.com/klauspost/compress/huff0" -) - -// history contains the information transferred between blocks. -type history struct { - // Literal decompression - huffTree *huff0.Scratch - - // Sequence decompression - decoders sequenceDecs - recentOffsets [3]int - - // History buffer... - b []byte - - // ignoreBuffer is meant to ignore a number of bytes - // when checking for matches in history - ignoreBuffer int - - windowSize int - allocFrameBuffer int // needed? - error bool - dict *dict -} - -// reset will reset the history to initial state of a frame. -// The history must already have been initialized to the desired size. -func (h *history) reset() { - h.b = h.b[:0] - h.ignoreBuffer = 0 - h.error = false - h.recentOffsets = [3]int{1, 4, 8} - h.decoders.freeDecoders() - h.decoders = sequenceDecs{br: h.decoders.br} - h.freeHuffDecoder() - h.huffTree = nil - h.dict = nil - //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) -} - -func (h *history) freeHuffDecoder() { - if h.huffTree != nil { - if h.dict == nil || h.dict.litEnc != h.huffTree { - huffDecoderPool.Put(h.huffTree) - h.huffTree = nil - } - } -} - -func (h *history) setDict(dict *dict) { - if dict == nil { - return - } - h.dict = dict - h.decoders.litLengths = dict.llDec - h.decoders.offsets = dict.ofDec - h.decoders.matchLengths = dict.mlDec - h.decoders.dict = dict.content - h.recentOffsets = dict.offsets - h.huffTree = dict.litEnc -} - -// append bytes to history. -// This function will make sure there is space for it, -// if the buffer has been allocated with enough extra space. -func (h *history) append(b []byte) { - if len(b) >= h.windowSize { - // Discard all history by simply overwriting - h.b = h.b[:h.windowSize] - copy(h.b, b[len(b)-h.windowSize:]) - return - } - - // If there is space, append it. - if len(b) < cap(h.b)-len(h.b) { - h.b = append(h.b, b...) - return - } - - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(b) + len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] - copy(h.b[h.windowSize-len(b):], b) -} - -// ensureBlock will ensure there is space for at least one block... -func (h *history) ensureBlock() { - if cap(h.b) < h.allocFrameBuffer { - h.b = make([]byte, 0, h.allocFrameBuffer) - return - } - - avail := cap(h.b) - len(h.b) - if avail >= h.windowSize || avail > maxCompressedBlockSize { - return - } - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] -} - -// append bytes to history without ever discarding anything. -func (h *history) appendKeep(b []byte) { - h.b = append(h.b, b...) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt deleted file mode 100644 index 24b53065f4..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md deleted file mode 100644 index 777290d44c..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# xxhash - -VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. - -xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -The package is written with optimized pure Go and also contains even faster -assembly implementations for amd64 and arm64. If desired, the `purego` build tag -opts into using the Go code even on those architectures. - -[xxHash]: http://cyan4973.github.io/xxHash/ - -## Compatibility - -This package is in a module and the latest code is in version 2 of the module. -You need a version of Go with at least "minimal module compatibility" to use -github.com/cespare/xxhash/v2: - -* 1.9.7+ for Go 1.9 -* 1.10.3+ for Go 1.10 -* Go 1.11 or later - -I recommend using the latest release of Go. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| ---------- | --------- | --------- | -| 4 B | 1.3 GB/s | 1.2 GB/s | -| 16 B | 2.9 GB/s | 3.5 GB/s | -| 100 B | 6.9 GB/s | 8.1 GB/s | -| 4 KB | 11.7 GB/s | 16.7 GB/s | -| 10 MB | 12.0 GB/s | 17.3 GB/s | - -These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C -CPU using the following commands under Go 1.19.2: - -``` -benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') -benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) -- [FreeCache](https://github.com/coocood/freecache) -- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go deleted file mode 100644 index fc40c82001..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ /dev/null @@ -1,230 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. - -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// Store the primes in an array as well. -// -// The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. -var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - memleft := d.mem[d.n&(len(d.mem)-1):] - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(memleft, b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - c := copy(memleft, b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[c:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - b := d.mem[:d.n&(len(d.mem)-1)] - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s deleted file mode 100644 index ddb63aa91b..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,210 +0,0 @@ -//go:build !appengine && gc && !purego && !noasm -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -#include "textflag.h" - -// Registers: -#define h AX -#define d AX -#define p SI // pointer to advance through b -#define n DX -#define end BX // loop end -#define v1 R8 -#define v2 R9 -#define v3 R10 -#define v4 R11 -#define x R12 -#define prime1 R13 -#define prime2 R14 -#define prime4 DI - -#define round(acc, x) \ - IMULQ prime2, x \ - ADDQ x, acc \ - ROLQ $31, acc \ - IMULQ prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - IMULQ prime2, x \ - ROLQ $31, x \ - IMULQ prime1, x - -// mergeRound applies a merge round on the two registers acc and x. -// It assumes that prime1, prime2, and prime4 have been loaded. -#define mergeRound(acc, x) \ - round0(x) \ - XORQ x, acc \ - IMULQ prime1, acc \ - ADDQ prime4, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that there is at least one block -// to process. -#define blockLoop() \ -loop: \ - MOVQ +0(p), x \ - round(v1, x) \ - MOVQ +8(p), x \ - round(v2, x) \ - MOVQ +16(p), x \ - round(v3, x) \ - MOVQ +24(p), x \ - round(v4, x) \ - ADDQ $32, p \ - CMPQ p, end \ - JLE loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - // Load fixed primes. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - MOVQ ·primes+24(SB), prime4 - - // Load slice. - MOVQ b_base+0(FP), p - MOVQ b_len+8(FP), n - LEAQ (p)(n*1), end - - // The first loop limit will be len(b)-32. - SUBQ $32, end - - // Check whether we have at least one block. - CMPQ n, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ prime1, v1 - ADDQ prime2, v1 - MOVQ prime2, v2 - XORQ v3, v3 - XORQ v4, v4 - SUBQ prime1, v4 - - blockLoop() - - MOVQ v1, h - ROLQ $1, h - MOVQ v2, x - ROLQ $7, x - ADDQ x, h - MOVQ v3, x - ROLQ $12, x - ADDQ x, h - MOVQ v4, x - ROLQ $18, x - ADDQ x, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - - JMP afterBlocks - -noBlocks: - MOVQ ·primes+32(SB), h - -afterBlocks: - ADDQ n, h - - ADDQ $24, end - CMPQ p, end - JG try4 - -loop8: - MOVQ (p), x - ADDQ $8, p - round0(x) - XORQ x, h - ROLQ $27, h - IMULQ prime1, h - ADDQ prime4, h - - CMPQ p, end - JLE loop8 - -try4: - ADDQ $4, end - CMPQ p, end - JG try1 - - MOVL (p), x - ADDQ $4, p - IMULQ prime1, x - XORQ x, h - - ROLQ $23, h - IMULQ prime2, h - ADDQ ·primes+16(SB), h - -try1: - ADDQ $4, end - CMPQ p, end - JGE finalize - -loop1: - MOVBQZX (p), x - ADDQ $1, p - IMULQ ·primes+32(SB), x - XORQ x, h - ROLQ $11, h - IMULQ prime1, h - - CMPQ p, end - JL loop1 - -finalize: - MOVQ h, x - SHRQ $33, x - XORQ x, h - IMULQ prime2, h - MOVQ h, x - SHRQ $29, x - XORQ x, h - IMULQ ·primes+16(SB), h - MOVQ h, x - SHRQ $32, x - XORQ x, h - - MOVQ h, ret+24(FP) - RET - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - // Load fixed primes needed for round. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - - // Load slice. - MOVQ b_base+8(FP), p - MOVQ b_len+16(FP), n - LEAQ (p)(n*1), end - SUBQ $32, end - - // Load vN from d. - MOVQ s+0(FP), d - MOVQ 0(d), v1 - MOVQ 8(d), v2 - MOVQ 16(d), v3 - MOVQ 24(d), v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. - blockLoop() - - // Copy vN back to d. - MOVQ v1, 0(d) - MOVQ v2, 8(d) - MOVQ v3, 16(d) - MOVQ v4, 24(d) - - // The number of bytes written is p minus the old base pointer. - SUBQ b_base+8(FP), p - MOVQ p, ret+32(FP) - - RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s deleted file mode 100644 index 17901e0804..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ /dev/null @@ -1,184 +0,0 @@ -//go:build !appengine && gc && !purego && !noasm -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -#include "textflag.h" - -// Registers: -#define digest R1 -#define h R2 // return value -#define p R3 // input pointer -#define n R4 // input length -#define nblocks R5 // n / 32 -#define prime1 R7 -#define prime2 R8 -#define prime3 R9 -#define prime4 R10 -#define prime5 R11 -#define v1 R12 -#define v2 R13 -#define v3 R14 -#define v4 R15 -#define x1 R20 -#define x2 R21 -#define x3 R22 -#define x4 R23 - -#define round(acc, x) \ - MADD prime2, acc, x, acc \ - ROR $64-31, acc \ - MUL prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - MUL prime2, x \ - ROR $64-31, x \ - MUL prime1, x - -#define mergeRound(acc, x) \ - round0(x) \ - EOR x, acc \ - MADD acc, prime4, prime1, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that n >= 32. -#define blockLoop() \ - LSR $5, n, nblocks \ - PCALIGN $16 \ - loop: \ - LDP.P 16(p), (x1, x2) \ - LDP.P 16(p), (x3, x4) \ - round(v1, x1) \ - round(v2, x2) \ - round(v3, x3) \ - round(v4, x4) \ - SUB $1, nblocks \ - CBNZ nblocks, loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - LDP b_base+0(FP), (p, n) - - LDP ·primes+0(SB), (prime1, prime2) - LDP ·primes+16(SB), (prime3, prime4) - MOVD ·primes+32(SB), prime5 - - CMP $32, n - CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } - BLT afterLoop - - ADD prime1, prime2, v1 - MOVD prime2, v2 - MOVD $0, v3 - NEG prime1, v4 - - blockLoop() - - ROR $64-1, v1, x1 - ROR $64-7, v2, x2 - ADD x1, x2 - ROR $64-12, v3, x3 - ROR $64-18, v4, x4 - ADD x3, x4 - ADD x2, x4, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - -afterLoop: - ADD n, h - - TBZ $4, n, try8 - LDP.P 16(p), (x1, x2) - - round0(x1) - - // NOTE: here and below, sequencing the EOR after the ROR (using a - // rotated register) is worth a small but measurable speedup for small - // inputs. - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - - round0(x2) - ROR $64-27, h - EOR x2 @> 64-27, h, h - MADD h, prime4, prime1, h - -try8: - TBZ $3, n, try4 - MOVD.P 8(p), x1 - - round0(x1) - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - -try4: - TBZ $2, n, try2 - MOVWU.P 4(p), x2 - - MUL prime1, x2 - ROR $64-23, h - EOR x2 @> 64-23, h, h - MADD h, prime3, prime2, h - -try2: - TBZ $1, n, try1 - MOVHU.P 2(p), x3 - AND $255, x3, x1 - LSR $8, x3, x2 - - MUL prime5, x1 - ROR $64-11, h - EOR x1 @> 64-11, h, h - MUL prime1, h - - MUL prime5, x2 - ROR $64-11, h - EOR x2 @> 64-11, h, h - MUL prime1, h - -try1: - TBZ $0, n, finalize - MOVBU (p), x4 - - MUL prime5, x4 - ROR $64-11, h - EOR x4 @> 64-11, h, h - MUL prime1, h - -finalize: - EOR h >> 33, h - MUL prime2, h - EOR h >> 29, h - MUL prime3, h - EOR h >> 32, h - - MOVD h, ret+24(FP) - RET - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - LDP ·primes+0(SB), (prime1, prime2) - - // Load state. Assume v[1-4] are stored contiguously. - MOVD d+0(FP), digest - LDP 0(digest), (v1, v2) - LDP 16(digest), (v3, v4) - - LDP b_base+8(FP), (p, n) - - blockLoop() - - // Store updated state. - STP (v1, v2), 0(digest) - STP (v3, v4), 16(digest) - - BIC $31, n - MOVD n, ret+32(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go deleted file mode 100644 index d4221edf4f..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm -// +build amd64 arm64 -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go deleted file mode 100644 index 0be16cefc7..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm -// +build !amd64,!arm64 appengine !gc purego noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := primes[0] + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -primes[0] - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go deleted file mode 100644 index 6f3b0cb102..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go +++ /dev/null @@ -1,11 +0,0 @@ -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go deleted file mode 100644 index f41932b7a4..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -// matchLen returns how many bytes match in a and b -// -// It assumes that: -// -// len(a) <= len(b) and len(a) > 0 -// -//go:noescape -func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s deleted file mode 100644 index 9a7655c0f7..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ /dev/null @@ -1,68 +0,0 @@ -// Copied from S2 implementation. - -//go:build !appengine && !noasm && gc && !noasm - -#include "textflag.h" - -// func matchLen(a []byte, b []byte) int -// Requires: BMI -TEXT ·matchLen(SB), NOSPLIT, $0-56 - MOVQ a_base+0(FP), AX - MOVQ b_base+24(FP), CX - MOVQ a_len+8(FP), DX - - // matchLen - XORL SI, SI - CMPL DX, $0x08 - JB matchlen_match4_standalone - -matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone - -#ifdef GOAMD64_v3 - TZCNTQ BX, BX -#else - BSFQ BX, BX -#endif - SARQ $0x03, BX - LEAL (SI)(BX*1), SI - JMP gen_match_len_end - -matchlen_loop_standalone: - LEAL -8(DX), DX - LEAL 8(SI), SI - CMPL DX, $0x08 - JAE matchlen_loopback_standalone - -matchlen_match4_standalone: - CMPL DX, $0x04 - JB matchlen_match2_standalone - MOVL (AX)(SI*1), BX - CMPL (CX)(SI*1), BX - JNE matchlen_match2_standalone - LEAL -4(DX), DX - LEAL 4(SI), SI - -matchlen_match2_standalone: - CMPL DX, $0x02 - JB matchlen_match1_standalone - MOVW (AX)(SI*1), BX - CMPW (CX)(SI*1), BX - JNE matchlen_match1_standalone - LEAL -2(DX), DX - LEAL 2(SI), SI - -matchlen_match1_standalone: - CMPL DX, $0x01 - JB gen_match_len_end - MOVB (AX)(SI*1), BL - CMPB (CX)(SI*1), BL - JNE gen_match_len_end - INCL SI - -gen_match_len_end: - MOVQ SI, ret+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go deleted file mode 100644 index 57b9c31c02..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "encoding/binary" - "math/bits" -) - -// matchLen returns the maximum common prefix length of a and b. -// a must be the shortest of the two. -func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) - if diff != 0 { - return n + bits.TrailingZeros64(diff)>>3 - } - n += 8 - } - - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n - -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go deleted file mode 100644 index d7fe6d82d9..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ /dev/null @@ -1,503 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "io" -) - -type seq struct { - litLen uint32 - matchLen uint32 - offset uint32 - - // Codes are stored here for the encoder - // so they only have to be looked up once. - llCode, mlCode, ofCode uint8 -} - -type seqVals struct { - ll, ml, mo int -} - -func (s seq) String() string { - if s.offset <= 3 { - if s.offset == 0 { - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") -} - -type seqCompMode uint8 - -const ( - compModePredefined seqCompMode = iota - compModeRLE - compModeFSE - compModeRepeat -) - -type sequenceDec struct { - // decoder keeps track of the current state and updates it from the bitstream. - fse *fseDecoder - state fseState - repeat bool -} - -// init the state of the decoder with input from stream. -func (s *sequenceDec) init(br *bitReader) error { - if s.fse == nil { - return errors.New("sequence decoder not defined") - } - s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Copy from dictionary... - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // we may be in dictionary. - dictO := len(s.dict) - (seq.mo - (t + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) - } - end := dictO + seq.ml - if end > len(s.dict) { - n := len(s.dict) - dictO - copy(out[t:], s.dict[dictO:]) - t += n - seq.ml -= n - } else { - copy(out[t:], s.dict[dictO:end]) - t += end - dictO - continue - } - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - // We must be in current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - continue - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} - -// decode sequences from the stream with the provided history. -func (s *sequenceDecs) decodeSync(hist []byte) error { - supported, err := s.decodeSyncSimple(hist) - if supported { - return err - } - - br := s.br - seqs := s.nSeqs - startSize := len(s.out) - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - out := s.out - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - if debugDecoder { - println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") - } - for i := seqs - 1; i >= 0; i-- { - if br.overread() { - printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) - return io.ErrUnexpectedEOF - } - var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - - if ll > len(s.literals) { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) - } - size := ll + ml + len(out) - if size-startSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - if size > cap(out) { - // Not enough size, which can happen under high volume block streaming conditions - // but could be if destination slice is too small for sync operations. - // over-allocating here can create a large amount of GC pressure so we try to keep - // it as contained as possible - used := len(out) - startSize - addBytes := 256 + ll + ml + used>>2 - // Clamp to max block size. - if used+addBytes > maxBlockSize { - addBytes = maxBlockSize - used - } - out = append(out, make([]byte, addBytes)...) - out = out[:len(out)-addBytes] - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - - // Add literals - out = append(out, s.literals[:ll]...) - s.literals = s.literals[ll:] - - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - - if mo > len(out)+len(hist) || mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - - // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(out) + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - end := dictO + ml - if end > len(s.dict) { - out = append(out, s.dict[dictO:]...) - ml -= len(s.dict) - dictO - } else { - out = append(out, s.dict[dictO:end]...) - mo = 0 - ml = 0 - } - } - - // Copy from history. - // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(out); v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if ml > v { - // Some goes into current block. - // Copy remainder of history - out = append(out, hist[start:]...) - ml -= v - } else { - out = append(out, hist[start:start+ml]...) - ml = 0 - } - } - // We must be in current buffer now - if ml > 0 { - start := len(out) - mo - if ml <= len(out)-start { - // No overlap - out = append(out, out[start:start+ml]...) - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - out = out[:len(out)+ml] - src := out[start : start+ml] - // Destination is the space we just added. - dst := out[len(out)-ml:] - dst = dst[:len(src)] - for i := range src { - dst[i] = src[i] - } - } - } - if i == 0 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - - if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - - // Add final literals - s.out = append(out, s.literals...) - return br.close() -} - -var bitMask [16]uint16 - -func init() { - for i := range bitMask[:] { - bitMask[i] = uint16((1 << uint(i)) - 1) - } -} - -func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fill() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fill() - } - // matchlength+literal length, max 32 bits - ml += br.getBits(mlB) - ll += br.getBits(llB) - mo = s.adjustOffset(mo, ll, moB) - return -} - -func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { - if offsetB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = offset - return offset - } - - if litLen == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - offset++ - } - - if offset == 0 { - return s.prevOffset[0] - } - var temp int - if offset == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[offset] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if offset != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - return temp -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go deleted file mode 100644 index 8adabd8287..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ /dev/null @@ -1,394 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" - "io" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -type decodeSyncAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - litRemain int - out []byte - outPosition int - literals []byte - litPosition int - history []byte - windowSize int - ll int // set on error (not for all errors, please refer to _generate/gen.go) - ml int // set on error (not for all errors, please refer to _generate/gen.go) - mo int // set on error (not for all errors, please refer to _generate/gen.go) -} - -// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. -// -//go:noescape -func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. -// -//go:noescape -func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// decode sequences from the stream with the provided history but without a dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - if len(s.dict) > 0 { - return false, nil - } - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { - return false, nil - } - - // FIXME: Using unsafe memory copies leads to rare, random crashes - // with fuzz testing. It is therefore disabled for now. - const useSafe = true - /* - useSafe := false - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { - useSafe = true - } - if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { - useSafe = true - } - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - useSafe = true - } - */ - - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeSyncAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - iteration: s.nSeqs - 1, - litRemain: len(s.literals), - out: s.out, - outPosition: len(s.out), - literals: s.literals, - windowSize: s.windowSize, - history: hist, - } - - s.seqSize = 0 - startSize := len(s.out) - - var errCode int - if cpuinfo.HasBMI2() { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) - } - } else { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) - } - } - switch errCode { - case noError: - break - - case errorMatchLenOfsMismatch: - return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) - - case errorMatchLenTooBig: - return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) - - case errorMatchOffTooBig: - return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", - ctx.mo, ctx.outPosition+len(hist)-startSize) - - case errorNotEnoughLiterals: - return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", - ctx.ll, ctx.litRemain+ctx.ll) - - case errorOverread: - return true, io.ErrUnexpectedEOF - - case errorNotEnoughSpace: - size := ctx.outPosition + ctx.ll + ctx.ml - if debugDecoder { - println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) - } - return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - - default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - return true, err - } - - s.literals = s.literals[ctx.litPosition:] - t := ctx.outPosition - s.out = s.out[:t] - - // Add final literals - s.out = append(s.out, s.literals...) - if debugDecoder { - t += len(s.literals) - if t != len(s.out) { - panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) - } - } - - return true, nil -} - -// -------------------------------------------------------------------------------- - -type decodeAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - seqs []seqVals - litRemain int -} - -const noError = 0 - -// error reported when mo == 0 && ml > 0 -const errorMatchLenOfsMismatch = 1 - -// error reported when ml > maxMatchLen -const errorMatchLenTooBig = 2 - -// error reported when mo > available history or mo > s.windowSize -const errorMatchOffTooBig = 3 - -// error reported when the sum of literal lengths exeeceds the literal buffer size -const errorNotEnoughLiterals = 4 - -// error reported when capacity of `out` is too small -const errorNotEnoughSpace = 5 - -// error reported when bits are overread. -const errorOverread = 6 - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - seqs: seqs, - iteration: len(seqs) - 1, - litRemain: len(s.literals), - } - - if debugDecoder { - println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") - } - - s.seqSize = 0 - lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 - var errCode int - if cpuinfo.HasBMI2() { - if lte56bits { - errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_bmi2(s, br, &ctx) - } - } else { - if lte56bits { - errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_amd64(s, br, &ctx) - } - } - if errCode != 0 { - i := len(seqs) - ctx.iteration - 1 - switch errCode { - case errorMatchLenOfsMismatch: - ml := ctx.seqs[i].ml - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - - case errorMatchLenTooBig: - ml := ctx.seqs[i].ml - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - - case errorNotEnoughLiterals: - ll := ctx.seqs[i].ll - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) - case errorOverread: - return io.ErrUnexpectedEOF - } - - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) - } - - if ctx.litRemain < 0 { - return fmt.Errorf("literal count is too big: total available %d, total requested %d", - len(s.literals), len(s.literals)-ctx.litRemain) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - if debugDecoder { - println("decode: ", br.remain(), "bits remain on stream. code:", errCode) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// -------------------------------------------------------------------------------- - -type executeAsmContext struct { - seqs []seqVals - seqIndex int - out []byte - history []byte - literals []byte - outPosition int - litPosition int - windowSize int -} - -// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. -// -// Returns false if a match offset is too big. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool - -// Same as above, but with safe memcopies -// -//go:noescape -func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool - -// executeSimple handles cases when dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { - addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - ctx := executeAsmContext{ - seqs: seqs, - seqIndex: 0, - out: out, - history: hist, - outPosition: t, - litPosition: 0, - literals: s.literals, - windowSize: s.windowSize, - } - var ok bool - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - ok = sequenceDecs_executeSimple_safe_amd64(&ctx) - } else { - ok = sequenceDecs_executeSimple_amd64(&ctx) - } - if !ok { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", - seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) - } - s.literals = s.literals[ctx.litPosition:] - t = ctx.outPosition - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s deleted file mode 100644 index 974b99725f..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ /dev/null @@ -1,4175 +0,0 @@ -// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_amd64(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_end - -sequenceDecs_decode_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_byte_by_byte - -sequenceDecs_decode_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_2_end - -sequenceDecs_decode_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte - -sequenceDecs_decode_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_amd64_adjust_offset_nonzero - -sequenceDecs_decode_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_amd64_adjust_zero - JEQ sequenceDecs_decode_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_amd64_adjust_three - JMP sequenceDecs_decode_amd64_adjust_two - -sequenceDecs_decode_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_56_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_56_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_56_amd64_fill_end - -sequenceDecs_decode_56_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_56_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_56_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte - -sequenceDecs_decode_56_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_56_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_56_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero - -sequenceDecs_decode_56_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_amd64_adjust_zero - JEQ sequenceDecs_decode_56_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_amd64_adjust_three - JMP sequenceDecs_decode_56_amd64_adjust_two - -sequenceDecs_decode_56_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_56_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_56_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_56_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_56_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_56_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_end - -sequenceDecs_decode_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_byte_by_byte - -sequenceDecs_decode_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_2_end - -sequenceDecs_decode_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte - -sequenceDecs_decode_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_bmi2_adjust_zero - JEQ sequenceDecs_decode_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_bmi2_adjust_three - JMP sequenceDecs_decode_bmi2_adjust_two - -sequenceDecs_decode_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_56_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_56_bmi2_fill_end - -sequenceDecs_decode_56_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_56_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_56_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte - -sequenceDecs_decode_56_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_56_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_56_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_bmi2_adjust_zero - JEQ sequenceDecs_decode_56_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_bmi2_adjust_three - JMP sequenceDecs_decode_56_bmi2_adjust_two - -sequenceDecs_decode_56_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_56_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_56_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_56_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_56_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (SI)(R14*1), X0 - MOVUPS X0, (BX)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, R11 - JB copy_1 - ADDQ R11, SI - ADDQ R11, BX - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ BX, R12 - ADDQ R13, BX - -copy_2: - MOVUPS (R11), X0 - MOVUPS X0, (R12) - ADDQ $0x10, R11 - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - MOVQ R11, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (SI), X0 - MOVUPS X0, (BX) - ADDQ $0x10, SI - ADDQ $0x10, BX - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(SI)(R14*1), SI - LEAQ 16(BX)(R14*1), BX - MOVUPS -16(SI), X0 - MOVUPS X0, -16(BX) - JMP copy_1_end - -copy_1_small: - CMPQ R11, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ R11, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (SI), R14 - MOVB -1(SI)(R11*1), R15 - MOVB R14, (BX) - MOVB R15, -1(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_3: - MOVW (SI), R14 - MOVB 2(SI), R15 - MOVW R14, (BX) - MOVB R15, 2(BX) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_4through7: - MOVL (SI), R14 - MOVL -4(SI)(R11*1), R15 - MOVL R14, (BX) - MOVL R15, -4(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (SI), R14 - MOVQ -8(SI)(R11*1), R15 - MOVQ R14, (BX) - MOVQ R15, -8(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - -copy_1_end: - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (R11), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R11 - ADDQ $0x10, BX - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(R11)(R12*1), R11 - LEAQ 16(BX)(R12*1), BX - MOVUPS -16(R11), X0 - MOVUPS X0, -16(BX) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (R11), R12 - MOVB -1(R11)(R13*1), R14 - MOVB R12, (BX) - MOVB R14, -1(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_3: - MOVW (R11), R12 - MOVB 2(R11), R14 - MOVW R12, (BX) - MOVB R14, 2(BX) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_4through7: - MOVL (R11), R12 - MOVL -4(R11)(R13*1), R14 - MOVL R12, (BX) - MOVL R14, -4(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (R11), R12 - MOVQ -8(R11)(R13*1), R14 - MOVQ R12, (BX) - MOVQ R14, -8(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_end - -sequenceDecs_decodeSync_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_2_end - -sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R11)(R14*1), X0 - MOVUPS X0, (R10)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, AX - JB copy_1 - ADDQ AX, R11 - ADDQ AX, R10 - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R10, CX - ADDQ R13, R10 - -copy_2: - MOVUPS (AX), X0 - MOVUPS X0, (CX) - ADDQ $0x10, AX - ADDQ $0x10, CX - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_end - -sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_2_end - -sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R10)(R14*1), X0 - MOVUPS X0, (R9)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, CX - JB copy_1 - ADDQ CX, R10 - ADDQ CX, R9 - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R9, R12 - ADDQ R13, R9 - -copy_2: - MOVUPS (CX), X0 - MOVUPS X0, (R12) - ADDQ $0x10, CX - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_safe_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_end - -sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end - -sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_safe_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_safe_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - MOVQ AX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R11), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R11 - ADDQ $0x10, R10 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R11)(R14*1), R11 - LEAQ 16(R10)(R14*1), R10 - MOVUPS -16(R11), X0 - MOVUPS X0, -16(R10) - JMP copy_1_end - -copy_1_small: - CMPQ AX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ AX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R11), R14 - MOVB -1(R11)(AX*1), R15 - MOVB R14, (R10) - MOVB R15, -1(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_3: - MOVW (R11), R14 - MOVB 2(R11), R15 - MOVW R14, (R10) - MOVB R15, 2(R10) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R11), R14 - MOVL -4(R11)(AX*1), R15 - MOVL R14, (R10) - MOVL R15, -4(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R11), R14 - MOVQ -8(R11)(AX*1), R15 - MOVQ R14, (R10) - MOVQ R15, -8(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - -copy_1_end: - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_2_small - -copy_2_loop: - MOVUPS (AX), X0 - MOVUPS X0, (R10) - ADDQ $0x10, AX - ADDQ $0x10, R10 - SUBQ $0x10, CX - JAE copy_2_loop - LEAQ 16(AX)(CX*1), AX - LEAQ 16(R10)(CX*1), R10 - MOVUPS -16(AX), X0 - MOVUPS X0, -16(R10) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (AX), CL - MOVB -1(AX)(R13*1), R14 - MOVB CL, (R10) - MOVB R14, -1(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_3: - MOVW (AX), CX - MOVB 2(AX), R14 - MOVW CX, (R10) - MOVB R14, 2(R10) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (AX), CX - MOVL -4(AX)(R13*1), R14 - MOVL CX, (R10) - MOVL R14, -4(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (AX), CX - MOVQ -8(AX)(R13*1), R14 - MOVQ CX, (R10) - MOVQ R14, -8(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_safe_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_safe_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_end - -sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end - -sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_safe_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_safe_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - MOVQ CX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R10), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R10 - ADDQ $0x10, R9 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R10)(R14*1), R10 - LEAQ 16(R9)(R14*1), R9 - MOVUPS -16(R10), X0 - MOVUPS X0, -16(R9) - JMP copy_1_end - -copy_1_small: - CMPQ CX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ CX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R10), R14 - MOVB -1(R10)(CX*1), R15 - MOVB R14, (R9) - MOVB R15, -1(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_3: - MOVW (R10), R14 - MOVB 2(R10), R15 - MOVW R14, (R9) - MOVB R15, 2(R9) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R10), R14 - MOVL -4(R10)(CX*1), R15 - MOVL R14, (R9) - MOVL R15, -4(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R10), R14 - MOVQ -8(R10)(CX*1), R15 - MOVQ R14, (R9) - MOVQ R15, -8(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - -copy_1_end: - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (CX), X0 - MOVUPS X0, (R9) - ADDQ $0x10, CX - ADDQ $0x10, R9 - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(CX)(R12*1), CX - LEAQ 16(R9)(R12*1), R9 - MOVUPS -16(CX), X0 - MOVUPS X0, -16(R9) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (CX), R12 - MOVB -1(CX)(R13*1), R14 - MOVB R12, (R9) - MOVB R14, -1(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_3: - MOVW (CX), R12 - MOVB 2(CX), R14 - MOVW R12, (R9) - MOVB R14, 2(R9) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (CX), R12 - MOVL -4(CX)(R13*1), R14 - MOVL R12, (R9) - MOVL R14, -4(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (CX), R12 - MOVQ -8(CX)(R13*1), R14 - MOVQ R12, (R9) - MOVQ R14, -8(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_safe_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go deleted file mode 100644 index 2fb35b788c..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ /dev/null @@ -1,237 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "fmt" - "io" -) - -// decode sequences from the stream with the provided history but without dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - return false, nil -} - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - s.seqSize = 0 - litRemain := len(s.literals) - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - for i := range seqs { - var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - if br.overread() { - if debugDecoder { - printf("reading sequence %d, exceeded available data\n", i) - } - return io.ErrUnexpectedEOF - } - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - // Evaluate. - // We might be doing this async, so do it early. - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - s.seqSize += ll + ml - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - litRemain -= ll - if litRemain < 0 { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) - } - seqs[i] = seqVals{ - ll: ll, - ml: ml, - mo: mo, - } - if i == len(seqs)-1 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - s.seqSize += litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// executeSimple handles cases when a dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize > cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Malformed input - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into the current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - - // We must be in the current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go deleted file mode 100644 index 8014174a77..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "math/bits" - -type seqCoders struct { - llEnc, ofEnc, mlEnc *fseEncoder - llPrev, ofPrev, mlPrev *fseEncoder -} - -// swap coders with another (block). -func (s *seqCoders) swap(other *seqCoders) { - *s, *other = *other, *s -} - -// setPrev will update the previous encoders to the actually used ones -// and make sure a fresh one is in the main slot. -func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { - compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { - // We used the new one, more current to history and reuse the previous history - if *current == used { - *prev, *current = *current, *prev - c := *current - p := *prev - c.reUsed = false - p.reUsed = true - return - } - if used == *prev { - return - } - // Ensure we cannot reuse by accident - prevEnc := *prev - prevEnc.symbolLen = 0 - } - compareSwap(ll, &s.llEnc, &s.llPrev) - compareSwap(ml, &s.mlEnc, &s.mlPrev) - compareSwap(of, &s.ofEnc, &s.ofPrev) -} - -func highBit(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} - -var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 16, 17, 17, 18, 18, 19, 19, - 20, 20, 20, 20, 21, 21, 21, 21, - 22, 22, 22, 22, 22, 22, 22, 22, - 23, 23, 23, 23, 23, 23, 23, 23, - 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24} - -// Up to 6 bits -const maxLLCode = 35 - -// llBitsTable translates from ll code to number of bits. -var llBitsTable = [maxLLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 16} - -// llCode returns the code that represents the literal length requested. -func llCode(litLength uint32) uint8 { - const llDeltaCode = 19 - if litLength <= 63 { - // Compiler insists on bounds check (Go 1.12) - return llCodeTable[litLength&63] - } - return uint8(highBit(litLength)) + llDeltaCode -} - -var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, - 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} - -// Up to 6 bits -const maxMLCode = 52 - -// mlBitsTable translates from ml code to number of bits. -var mlBitsTable = [maxMLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 4, 5, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16} - -// note : mlBase = matchLength - MINMATCH; -// because it's the format it's stored in seqStore->sequences -func mlCode(mlBase uint32) uint8 { - const mlDeltaCode = 36 - if mlBase <= 127 { - // Compiler insists on bounds check (Go 1.12) - return mlCodeTable[mlBase&127] - } - return uint8(highBit(mlBase)) + mlDeltaCode -} - -func ofCode(offset uint32) uint8 { - // A valid offset will always be > 0. - return uint8(bits.Len32(offset) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go deleted file mode 100644 index ec13594e89..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ /dev/null @@ -1,434 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "hash/crc32" - "io" - - "github.com/klauspost/compress/huff0" - snappy "github.com/klauspost/compress/internal/snapref" -) - -const ( - snappyTagLiteral = 0x00 - snappyTagCopy1 = 0x01 - snappyTagCopy2 = 0x02 - snappyTagCopy4 = 0x03 -) - -const ( - snappyChecksumSize = 4 - snappyMagicBody = "sNaPpY" - - // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - snappyMaxBlockSize = 65536 - - // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - snappyMaxEncodedLenOfMaxBlockSize = 76490 -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var ( - // ErrSnappyCorrupt reports that the input is invalid. - ErrSnappyCorrupt = errors.New("snappy: corrupt input") - // ErrSnappyTooLarge reports that the uncompressed length is too large. - ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") - // ErrSnappyUnsupported reports that the input isn't supported. - ErrSnappyUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. -// Conversion is done by converting the stream directly from Snappy without intermediate -// full decoding. -// Therefore the compression ratio is much less than what can be done by a full decompression -// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without -// any errors being generated. -// No CRC value is being generated and not all CRC values of the Snappy stream are checked. -// However, it provides really fast recompression of Snappy streams. -// The converter can be reused to avoid allocations, even after errors. -type SnappyConverter struct { - r io.Reader - err error - buf []byte - block *blockEnc -} - -// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. -// If any error is detected on the Snappy stream it is returned. -// The number of bytes written is returned. -func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { - initPredefined() - r.err = nil - r.r = in - if r.block == nil { - r.block = &blockEnc{} - r.block.init() - } - r.block.initNewEncode() - if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { - r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) - } - r.block.litEnc.Reuse = huff0.ReusePolicyNone - var written int64 - var readHeader bool - { - header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) - - var n int - n, r.err = w.Write(header) - if r.err != nil { - return written, r.err - } - written += int64(n) - } - - for { - if !r.readFull(r.buf[:4], true) { - // Add empty last block - r.block.reset(nil) - r.block.last = true - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, err := w.Write(r.block.output) - if err != nil { - return written, err - } - written += int64(n) - - return written, r.err - } - chunkType := r.buf[0] - if !readHeader { - if chunkType != chunkTypeStreamIdentifier { - println("chunkType != chunkTypeStreamIdentifier", chunkType) - r.err = ErrSnappyCorrupt - return written, r.err - } - readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - println("chunkLen > len(r.buf)", chunkType) - r.err = ErrSnappyUnsupported - return written, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return written, r.err - } - //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[snappyChecksumSize:] - - n, hdr, err := snappyDecodedLen(buf) - if err != nil { - r.err = err - return written, r.err - } - buf = buf[hdr:] - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - r.block.pushOffsets() - if err := decodeSnappy(r.block, buf); err != nil { - r.err = err - return written, r.err - } - if r.block.size+r.block.extraLits != n { - printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) - r.err = ErrSnappyCorrupt - return written, r.err - } - err = r.block.encode(nil, false, false) - switch err { - case errIncompressible: - r.block.popOffsets() - r.block.reset(nil) - r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) - if err != nil { - return written, err - } - err = r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - case nil: - default: - return written, err - } - - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - case chunkTypeUncompressedData: - if debugEncoder { - println("Uncompressed, chunklen", chunkLen) - } - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - buf := r.buf[:snappyChecksumSize] - if !r.readFull(buf, false) { - return written, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - snappyChecksumSize - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.literals = r.block.literals[:n] - if !r.readFull(r.block.literals, false) { - return written, r.err - } - if snappyCRC(r.block.literals) != checksum { - println("literals crc mismatch") - r.err = ErrSnappyCorrupt - return written, r.err - } - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - - case chunkTypeStreamIdentifier: - if debugEncoder { - println("stream id", chunkLen, len(snappyMagicBody)) - } - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(snappyMagicBody) { - println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) - r.err = ErrSnappyCorrupt - return written, r.err - } - if !r.readFull(r.buf[:len(snappyMagicBody)], false) { - return written, r.err - } - for i := 0; i < len(snappyMagicBody); i++ { - if r.buf[i] != snappyMagicBody[i] { - println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) - r.err = ErrSnappyCorrupt - return written, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - println("chunkType <= 0x7f") - r.err = ErrSnappyUnsupported - return written, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return written, r.err - } - } -} - -// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read. -func decodeSnappy(blk *blockEnc, src []byte) error { - //decodeRef(make([]byte, snappyMaxBlockSize), src) - var s, length int - lits := blk.extraLits - var offset uint32 - for s < len(src) { - switch src[s] & 0x03 { - case snappyTagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - if x > snappyMaxBlockSize { - println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) - return ErrSnappyCorrupt - } - length = int(x) + 1 - if length <= 0 { - println("length <= 0 ", length) - - return errUnsupportedLiteralLength - } - //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { - // return ErrSnappyCorrupt - //} - - blk.literals = append(blk.literals, src[s:s+length]...) - //println(length, "litLen") - lits += length - s += length - continue - - case snappyTagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) - - case snappyTagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = uint32(src[s-2]) | uint32(src[s-1])<<8 - - case snappyTagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - - if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { - println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) - - return ErrSnappyCorrupt - } - - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if false { - offset = blk.matchOffset(offset, uint32(lits)) - } else { - offset += 3 - } - - blk.sequences = append(blk.sequences, seq{ - litLen: uint32(lits), - offset: offset, - matchLen: uint32(length) - zstdMinMatch, - }) - blk.size += length + lits - lits = 0 - } - blk.extraLits = lits - return nil -} - -func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrSnappyCorrupt - } - return false - } - return true -} - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func snappyCRC(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return c>>15 | c<<17 + 0xa282ead8 -} - -// snappyDecodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrSnappyCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrSnappyTooLarge - } - return int(v), n, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go deleted file mode 100644 index 29c15c8c4e..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "errors" - "io" - "sync" -) - -// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. -// See https://www.winzip.com/win/en/comp_info.html -const ZipMethodWinZip = 93 - -// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. -// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. -// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT -const ZipMethodPKWare = 20 - -// zipReaderPool is the default reader pool. -var zipReaderPool = sync.Pool{New: func() interface{} { - z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) - if err != nil { - panic(err) - } - return z -}} - -// newZipReader creates a pooled zip decompressor. -func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { - pool := &zipReaderPool - if len(opts) > 0 { - opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) - // Force concurrency 1 - opts = append(opts, WithDecoderConcurrency(1)) - // Create our own pool - pool = &sync.Pool{} - } - return func(r io.Reader) io.ReadCloser { - dec, ok := pool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, opts...) - if err != nil { - panic(err) - } - dec = d - } - return &pooledZipReader{dec: dec, pool: pool} - } -} - -type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - pool *sync.Pool - dec *Decoder -} - -func (r *pooledZipReader) Read(p []byte) (n int, err error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.dec == nil { - return 0, errors.New("read after close or EOF") - } - dec, err := r.dec.Read(p) - if err == io.EOF { - r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return dec, err -} - -func (r *pooledZipReader) Close() error { - r.mu.Lock() - defer r.mu.Unlock() - var err error - if r.dec != nil { - err = r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return err -} - -type pooledZipWriter struct { - mu sync.Mutex // guards Close and Read - enc *Encoder - pool *sync.Pool -} - -func (w *pooledZipWriter) Write(p []byte) (n int, err error) { - w.mu.Lock() - defer w.mu.Unlock() - if w.enc == nil { - return 0, errors.New("Write after Close") - } - return w.enc.Write(p) -} - -func (w *pooledZipWriter) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - var err error - if w.enc != nil { - err = w.enc.Close() - w.pool.Put(w.enc) - w.enc = nil - } - return err -} - -// ZipCompressor returns a compressor that can be registered with zip libraries. -// The provided encoder options will be used on all encodes. -func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { - var pool sync.Pool - return func(w io.Writer) (io.WriteCloser, error) { - enc, ok := pool.Get().(*Encoder) - if ok { - enc.Reset(w) - } else { - var err error - enc, err = NewWriter(w, opts...) - if err != nil { - return nil, err - } - } - return &pooledZipWriter{enc: enc, pool: &pool}, nil - } -} - -// ZipDecompressor returns a decompressor that can be registered with zip libraries. -// See ZipCompressor for example. -// Options can be specified. WithDecoderConcurrency(1) is forced, -// and by default a 128MB maximum decompression window is specified. -// The window size can be overridden if required. -func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { - return newZipReader(opts...) -} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go deleted file mode 100644 index 4be7cc7367..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ /dev/null @@ -1,121 +0,0 @@ -// Package zstd provides decompression of zstandard files. -// -// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "log" - "math" -) - -// enable debug printing -const debug = false - -// enable encoding debug printing -const debugEncoder = debug - -// enable decoding debug printing -const debugDecoder = debug - -// Enable extra assertions. -const debugAsserts = debug || false - -// print sequence details -const debugSequences = false - -// print detailed matching information -const debugMatches = false - -// force encoder to use predefined tables. -const forcePreDef = false - -// zstdMinMatch is the minimum zstd match length. -const zstdMinMatch = 3 - -// fcsUnknown is used for unknown frame content size. -const fcsUnknown = math.MaxUint64 - -var ( - // ErrReservedBlockType is returned when a reserved block type is found. - // Typically this indicates wrong or corrupted input. - ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") - - // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. - // Typically this indicates wrong or corrupted input. - ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") - - // ErrBlockTooSmall is returned when a block is too small to be decoded. - // Typically returned on invalid input. - ErrBlockTooSmall = errors.New("block too small") - - // ErrUnexpectedBlockSize is returned when a block has unexpected size. - // Typically returned on invalid input. - ErrUnexpectedBlockSize = errors.New("unexpected block size") - - // ErrMagicMismatch is returned when a "magic" number isn't what is expected. - // Typically this indicates wrong or corrupted input. - ErrMagicMismatch = errors.New("invalid input: magic number mismatch") - - // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeExceeded = errors.New("window size exceeded") - - // ErrWindowSizeTooSmall is returned when no window size is specified. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") - - // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. - ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") - - // ErrUnknownDictionary is returned if the dictionary ID is unknown. - ErrUnknownDictionary = errors.New("unknown dictionary") - - // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeExceeded = errors.New("frame size exceeded") - - // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") - - // ErrCRCMismatch is returned if CRC mismatches. - ErrCRCMismatch = errors.New("CRC check failed") - - // ErrDecoderClosed will be returned if the Decoder was used after - // Close has been called. - ErrDecoderClosed = errors.New("decoder used after Close") - - // ErrDecoderNilInput is returned when a nil Reader was provided - // and an operation other than Reset/DecodeAll/Close was attempted. - ErrDecoderNilInput = errors.New("nil input provided as reader") -) - -func println(a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Println(a...) - } -} - -func printf(format string, a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Printf(format, a...) - } -} - -func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) -} - -func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) -} - -type byter interface { - Bytes() []byte - Len() int -} - -var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/letsencrypt/boulder/LICENSE.txt b/vendor/github.com/letsencrypt/boulder/LICENSE.txt deleted file mode 100644 index fa274d92d7..0000000000 --- a/vendor/github.com/letsencrypt/boulder/LICENSE.txt +++ /dev/null @@ -1,375 +0,0 @@ -Copyright 2016 ISRG. All rights reserved. - -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/letsencrypt/boulder/core/challenges.go b/vendor/github.com/letsencrypt/boulder/core/challenges.go deleted file mode 100644 index 1d7e2408d5..0000000000 --- a/vendor/github.com/letsencrypt/boulder/core/challenges.go +++ /dev/null @@ -1,45 +0,0 @@ -package core - -import "fmt" - -func newChallenge(challengeType AcmeChallenge, token string) Challenge { - return Challenge{ - Type: challengeType, - Status: StatusPending, - Token: token, - } -} - -// HTTPChallenge01 constructs a random http-01 challenge. If token is empty a random token -// will be generated, otherwise the provided token is used. -func HTTPChallenge01(token string) Challenge { - return newChallenge(ChallengeTypeHTTP01, token) -} - -// DNSChallenge01 constructs a random dns-01 challenge. If token is empty a random token -// will be generated, otherwise the provided token is used. -func DNSChallenge01(token string) Challenge { - return newChallenge(ChallengeTypeDNS01, token) -} - -// TLSALPNChallenge01 constructs a random tls-alpn-01 challenge. If token is empty a random token -// will be generated, otherwise the provided token is used. -func TLSALPNChallenge01(token string) Challenge { - return newChallenge(ChallengeTypeTLSALPN01, token) -} - -// NewChallenge constructs a random challenge of the given kind. It returns an -// error if the challenge type is unrecognized. If token is empty a random token -// will be generated, otherwise the provided token is used. -func NewChallenge(kind AcmeChallenge, token string) (Challenge, error) { - switch kind { - case ChallengeTypeHTTP01: - return HTTPChallenge01(token), nil - case ChallengeTypeDNS01: - return DNSChallenge01(token), nil - case ChallengeTypeTLSALPN01: - return TLSALPNChallenge01(token), nil - default: - return Challenge{}, fmt.Errorf("unrecognized challenge type %q", kind) - } -} diff --git a/vendor/github.com/letsencrypt/boulder/core/interfaces.go b/vendor/github.com/letsencrypt/boulder/core/interfaces.go deleted file mode 100644 index 003329c3f5..0000000000 --- a/vendor/github.com/letsencrypt/boulder/core/interfaces.go +++ /dev/null @@ -1,14 +0,0 @@ -package core - -import ( - "github.com/letsencrypt/boulder/identifier" -) - -// PolicyAuthority defines the public interface for the Boulder PA -// TODO(#5891): Move this interface to a more appropriate location. -type PolicyAuthority interface { - WillingToIssueWildcards([]identifier.ACMEIdentifier) error - ChallengesFor(identifier.ACMEIdentifier) ([]Challenge, error) - ChallengeTypeEnabled(AcmeChallenge) bool - CheckAuthz(*Authorization) error -} diff --git a/vendor/github.com/letsencrypt/boulder/core/objects.go b/vendor/github.com/letsencrypt/boulder/core/objects.go deleted file mode 100644 index b52f0f5e0a..0000000000 --- a/vendor/github.com/letsencrypt/boulder/core/objects.go +++ /dev/null @@ -1,521 +0,0 @@ -package core - -import ( - "crypto" - "encoding/base64" - "encoding/json" - "fmt" - "hash/fnv" - "net" - "strings" - "time" - - "golang.org/x/crypto/ocsp" - "gopkg.in/go-jose/go-jose.v2" - - "github.com/letsencrypt/boulder/identifier" - "github.com/letsencrypt/boulder/probs" - "github.com/letsencrypt/boulder/revocation" -) - -// AcmeStatus defines the state of a given authorization -type AcmeStatus string - -// These statuses are the states of authorizations, challenges, and registrations -const ( - StatusUnknown = AcmeStatus("unknown") // Unknown status; the default - StatusPending = AcmeStatus("pending") // In process; client has next action - StatusProcessing = AcmeStatus("processing") // In process; server has next action - StatusReady = AcmeStatus("ready") // Order is ready for finalization - StatusValid = AcmeStatus("valid") // Object is valid - StatusInvalid = AcmeStatus("invalid") // Validation failed - StatusRevoked = AcmeStatus("revoked") // Object no longer valid - StatusDeactivated = AcmeStatus("deactivated") // Object has been deactivated -) - -// AcmeResource values identify different types of ACME resources -type AcmeResource string - -// The types of ACME resources -const ( - ResourceNewReg = AcmeResource("new-reg") - ResourceNewAuthz = AcmeResource("new-authz") - ResourceNewCert = AcmeResource("new-cert") - ResourceRevokeCert = AcmeResource("revoke-cert") - ResourceRegistration = AcmeResource("reg") - ResourceChallenge = AcmeResource("challenge") - ResourceAuthz = AcmeResource("authz") - ResourceKeyChange = AcmeResource("key-change") -) - -// AcmeChallenge values identify different types of ACME challenges -type AcmeChallenge string - -// These types are the available challenges -const ( - ChallengeTypeHTTP01 = AcmeChallenge("http-01") - ChallengeTypeDNS01 = AcmeChallenge("dns-01") - ChallengeTypeTLSALPN01 = AcmeChallenge("tls-alpn-01") -) - -// IsValid tests whether the challenge is a known challenge -func (c AcmeChallenge) IsValid() bool { - switch c { - case ChallengeTypeHTTP01, ChallengeTypeDNS01, ChallengeTypeTLSALPN01: - return true - default: - return false - } -} - -// OCSPStatus defines the state of OCSP for a domain -type OCSPStatus string - -// These status are the states of OCSP -const ( - OCSPStatusGood = OCSPStatus("good") - OCSPStatusRevoked = OCSPStatus("revoked") - // Not a real OCSP status. This is a placeholder we write before the - // actual precertificate is issued, to ensure we never return "good" before - // issuance succeeds, for BR compliance reasons. - OCSPStatusNotReady = OCSPStatus("wait") -) - -var OCSPStatusToInt = map[OCSPStatus]int{ - OCSPStatusGood: ocsp.Good, - OCSPStatusRevoked: ocsp.Revoked, - OCSPStatusNotReady: -1, -} - -// DNSPrefix is attached to DNS names in DNS challenges -const DNSPrefix = "_acme-challenge" - -type RawCertificateRequest struct { - CSR JSONBuffer `json:"csr"` // The encoded CSR -} - -// Registration objects represent non-public metadata attached -// to account keys. -type Registration struct { - // Unique identifier - ID int64 `json:"id,omitempty" db:"id"` - - // Account key to which the details are attached - Key *jose.JSONWebKey `json:"key"` - - // Contact URIs - Contact *[]string `json:"contact,omitempty"` - - // Agreement with terms of service - Agreement string `json:"agreement,omitempty"` - - // InitialIP is the IP address from which the registration was created - InitialIP net.IP `json:"initialIp"` - - // CreatedAt is the time the registration was created. - CreatedAt *time.Time `json:"createdAt,omitempty"` - - Status AcmeStatus `json:"status"` -} - -// ValidationRecord represents a validation attempt against a specific URL/hostname -// and the IP addresses that were resolved and used -type ValidationRecord struct { - // SimpleHTTP only - URL string `json:"url,omitempty"` - - // Shared - Hostname string `json:"hostname,omitempty"` - Port string `json:"port,omitempty"` - AddressesResolved []net.IP `json:"addressesResolved,omitempty"` - AddressUsed net.IP `json:"addressUsed,omitempty"` - // AddressesTried contains a list of addresses tried before the `AddressUsed`. - // Presently this will only ever be one IP from `AddressesResolved` since the - // only retry is in the case of a v6 failure with one v4 fallback. E.g. if - // a record with `AddressesResolved: { 127.0.0.1, ::1 }` were processed for - // a challenge validation with the IPv6 first flag on and the ::1 address - // failed but the 127.0.0.1 retry succeeded then the record would end up - // being: - // { - // ... - // AddressesResolved: [ 127.0.0.1, ::1 ], - // AddressUsed: 127.0.0.1 - // AddressesTried: [ ::1 ], - // ... - // } - AddressesTried []net.IP `json:"addressesTried,omitempty"` -} - -func looksLikeKeyAuthorization(str string) error { - parts := strings.Split(str, ".") - if len(parts) != 2 { - return fmt.Errorf("Invalid key authorization: does not look like a key authorization") - } else if !LooksLikeAToken(parts[0]) { - return fmt.Errorf("Invalid key authorization: malformed token") - } else if !LooksLikeAToken(parts[1]) { - // Thumbprints have the same syntax as tokens in boulder - // Both are base64-encoded and 32 octets - return fmt.Errorf("Invalid key authorization: malformed key thumbprint") - } - return nil -} - -// Challenge is an aggregate of all data needed for any challenges. -// -// Rather than define individual types for different types of -// challenge, we just throw all the elements into one bucket, -// together with the common metadata elements. -type Challenge struct { - // The type of challenge - Type AcmeChallenge `json:"type"` - - // The status of this challenge - Status AcmeStatus `json:"status,omitempty"` - - // Contains the error that occurred during challenge validation, if any - Error *probs.ProblemDetails `json:"error,omitempty"` - - // A URI to which a response can be POSTed - URI string `json:"uri,omitempty"` - - // For the V2 API the "URI" field is deprecated in favour of URL. - URL string `json:"url,omitempty"` - - // Used by http-01, tls-sni-01, tls-alpn-01 and dns-01 challenges - Token string `json:"token,omitempty"` - - // The expected KeyAuthorization for validation of the challenge. Populated by - // the RA prior to passing the challenge to the VA. For legacy reasons this - // field is called "ProvidedKeyAuthorization" because it was initially set by - // the content of the challenge update POST from the client. It is no longer - // set that way and should be renamed to "KeyAuthorization". - // TODO(@cpu): Rename `ProvidedKeyAuthorization` to `KeyAuthorization`. - ProvidedKeyAuthorization string `json:"keyAuthorization,omitempty"` - - // Contains information about URLs used or redirected to and IPs resolved and - // used - ValidationRecord []ValidationRecord `json:"validationRecord,omitempty"` - // The time at which the server validated the challenge. Required by - // RFC8555 if status is valid. - Validated *time.Time `json:"validated,omitempty"` -} - -// ExpectedKeyAuthorization computes the expected KeyAuthorization value for -// the challenge. -func (ch Challenge) ExpectedKeyAuthorization(key *jose.JSONWebKey) (string, error) { - if key == nil { - return "", fmt.Errorf("Cannot authorize a nil key") - } - - thumbprint, err := key.Thumbprint(crypto.SHA256) - if err != nil { - return "", err - } - - return ch.Token + "." + base64.RawURLEncoding.EncodeToString(thumbprint), nil -} - -// RecordsSane checks the sanity of a ValidationRecord object before sending it -// back to the RA to be stored. -func (ch Challenge) RecordsSane() bool { - if ch.ValidationRecord == nil || len(ch.ValidationRecord) == 0 { - return false - } - - switch ch.Type { - case ChallengeTypeHTTP01: - for _, rec := range ch.ValidationRecord { - if rec.URL == "" || rec.Hostname == "" || rec.Port == "" || rec.AddressUsed == nil || - len(rec.AddressesResolved) == 0 { - return false - } - } - case ChallengeTypeTLSALPN01: - if len(ch.ValidationRecord) > 1 { - return false - } - if ch.ValidationRecord[0].URL != "" { - return false - } - if ch.ValidationRecord[0].Hostname == "" || ch.ValidationRecord[0].Port == "" || - ch.ValidationRecord[0].AddressUsed == nil || len(ch.ValidationRecord[0].AddressesResolved) == 0 { - return false - } - case ChallengeTypeDNS01: - if len(ch.ValidationRecord) > 1 { - return false - } - if ch.ValidationRecord[0].Hostname == "" { - return false - } - return true - default: // Unsupported challenge type - return false - } - - return true -} - -// CheckConsistencyForClientOffer checks the fields of a challenge object before it is -// given to the client. -func (ch Challenge) CheckConsistencyForClientOffer() error { - err := ch.checkConsistency() - if err != nil { - return err - } - - // Before completion, the key authorization field should be empty - if ch.ProvidedKeyAuthorization != "" { - return fmt.Errorf("A response to this challenge was already submitted.") - } - return nil -} - -// CheckConsistencyForValidation checks the fields of a challenge object before it is -// given to the VA. -func (ch Challenge) CheckConsistencyForValidation() error { - err := ch.checkConsistency() - if err != nil { - return err - } - - // If the challenge is completed, then there should be a key authorization - return looksLikeKeyAuthorization(ch.ProvidedKeyAuthorization) -} - -// checkConsistency checks the sanity of a challenge object before issued to the client. -func (ch Challenge) checkConsistency() error { - if ch.Status != StatusPending { - return fmt.Errorf("The challenge is not pending.") - } - - // There always needs to be a token - if !LooksLikeAToken(ch.Token) { - return fmt.Errorf("The token is missing.") - } - return nil -} - -// StringID is used to generate a ID for challenges associated with new style authorizations. -// This is necessary as these challenges no longer have a unique non-sequential identifier -// in the new storage scheme. This identifier is generated by constructing a fnv hash over the -// challenge token and type and encoding the first 4 bytes of it using the base64 URL encoding. -func (ch Challenge) StringID() string { - h := fnv.New128a() - h.Write([]byte(ch.Token)) - h.Write([]byte(ch.Type)) - return base64.RawURLEncoding.EncodeToString(h.Sum(nil)[0:4]) -} - -// Authorization represents the authorization of an account key holder -// to act on behalf of a domain. This struct is intended to be used both -// internally and for JSON marshaling on the wire. Any fields that should be -// suppressed on the wire (e.g., ID, regID) must be made empty before marshaling. -type Authorization struct { - // An identifier for this authorization, unique across - // authorizations and certificates within this instance. - ID string `json:"id,omitempty" db:"id"` - - // The identifier for which authorization is being given - Identifier identifier.ACMEIdentifier `json:"identifier,omitempty" db:"identifier"` - - // The registration ID associated with the authorization - RegistrationID int64 `json:"regId,omitempty" db:"registrationID"` - - // The status of the validation of this authorization - Status AcmeStatus `json:"status,omitempty" db:"status"` - - // The date after which this authorization will be no - // longer be considered valid. Note: a certificate may be issued even on the - // last day of an authorization's lifetime. The last day for which someone can - // hold a valid certificate based on an authorization is authorization - // lifetime + certificate lifetime. - Expires *time.Time `json:"expires,omitempty" db:"expires"` - - // An array of challenges objects used to validate the - // applicant's control of the identifier. For authorizations - // in process, these are challenges to be fulfilled; for - // final authorizations, they describe the evidence that - // the server used in support of granting the authorization. - // - // There should only ever be one challenge of each type in this - // slice and the order of these challenges may not be predictable. - Challenges []Challenge `json:"challenges,omitempty" db:"-"` - - // https://datatracker.ietf.org/doc/html/rfc8555#page-29 - // - // wildcard (optional, boolean): This field MUST be present and true - // for authorizations created as a result of a newOrder request - // containing a DNS identifier with a value that was a wildcard - // domain name. For other authorizations, it MUST be absent. - // Wildcard domain names are described in Section 7.1.3. - // - // This is not represented in the database because we calculate it from - // the identifier stored in the database. Unlike the identifier returned - // as part of the authorization, the identifier we store in the database - // can contain an asterisk. - Wildcard bool `json:"wildcard,omitempty" db:"-"` -} - -// FindChallengeByStringID will look for a challenge matching the given ID inside -// this authorization. If found, it will return the index of that challenge within -// the Authorization's Challenges array. Otherwise it will return -1. -func (authz *Authorization) FindChallengeByStringID(id string) int { - for i, c := range authz.Challenges { - if c.StringID() == id { - return i - } - } - return -1 -} - -// SolvedBy will look through the Authorizations challenges, returning the type -// of the *first* challenge it finds with Status: valid, or an error if no -// challenge is valid. -func (authz *Authorization) SolvedBy() (AcmeChallenge, error) { - if len(authz.Challenges) == 0 { - return "", fmt.Errorf("Authorization has no challenges") - } - for _, chal := range authz.Challenges { - if chal.Status == StatusValid { - return chal.Type, nil - } - } - return "", fmt.Errorf("Authorization not solved by any challenge") -} - -// JSONBuffer fields get encoded and decoded JOSE-style, in base64url encoding -// with stripped padding. -type JSONBuffer []byte - -// MarshalJSON encodes a JSONBuffer for transmission. -func (jb JSONBuffer) MarshalJSON() (result []byte, err error) { - return json.Marshal(base64.RawURLEncoding.EncodeToString(jb)) -} - -// UnmarshalJSON decodes a JSONBuffer to an object. -func (jb *JSONBuffer) UnmarshalJSON(data []byte) (err error) { - var str string - err = json.Unmarshal(data, &str) - if err != nil { - return err - } - *jb, err = base64.RawURLEncoding.DecodeString(strings.TrimRight(str, "=")) - return -} - -// Certificate objects are entirely internal to the server. The only -// thing exposed on the wire is the certificate itself. -type Certificate struct { - ID int64 `db:"id"` - RegistrationID int64 `db:"registrationID"` - - Serial string `db:"serial"` - Digest string `db:"digest"` - DER []byte `db:"der"` - Issued time.Time `db:"issued"` - Expires time.Time `db:"expires"` -} - -// CertificateStatus structs are internal to the server. They represent the -// latest data about the status of the certificate, required for generating new -// OCSP responses and determining if a certificate has been revoked. -type CertificateStatus struct { - ID int64 `db:"id"` - - Serial string `db:"serial"` - - // status: 'good' or 'revoked'. Note that good, expired certificates remain - // with status 'good' but don't necessarily get fresh OCSP responses. - Status OCSPStatus `db:"status"` - - // ocspLastUpdated: The date and time of the last time we generated an OCSP - // response. If we have never generated one, this has the zero value of - // time.Time, i.e. Jan 1 1970. - OCSPLastUpdated time.Time `db:"ocspLastUpdated"` - - // revokedDate: If status is 'revoked', this is the date and time it was - // revoked. Otherwise it has the zero value of time.Time, i.e. Jan 1 1970. - RevokedDate time.Time `db:"revokedDate"` - - // revokedReason: If status is 'revoked', this is the reason code for the - // revocation. Otherwise it is zero (which happens to be the reason - // code for 'unspecified'). - RevokedReason revocation.Reason `db:"revokedReason"` - - LastExpirationNagSent time.Time `db:"lastExpirationNagSent"` - - // NotAfter and IsExpired are convenience columns which allow expensive - // queries to quickly filter out certificates that we don't need to care about - // anymore. These are particularly useful for the expiration mailer and CRL - // updater. See https://github.com/letsencrypt/boulder/issues/1864. - NotAfter time.Time `db:"notAfter"` - IsExpired bool `db:"isExpired"` - - // Note: this is not an issuance.IssuerNameID because that would create an - // import cycle between core and issuance. - // Note2: This field used to be called `issuerID`. We keep the old name in - // the DB, but update the Go field name to be clear which type of ID this - // is. - IssuerNameID int64 `db:"issuerID"` -} - -// FQDNSet contains the SHA256 hash of the lowercased, comma joined dNSNames -// contained in a certificate. -type FQDNSet struct { - ID int64 - SetHash []byte - Serial string - Issued time.Time - Expires time.Time -} - -// SCTDERs is a convenience type -type SCTDERs [][]byte - -// CertDER is a convenience type that helps differentiate what the -// underlying byte slice contains -type CertDER []byte - -// SuggestedWindow is a type exposed inside the RenewalInfo resource. -type SuggestedWindow struct { - Start time.Time `json:"start"` - End time.Time `json:"end"` -} - -// RenewalInfo is a type which is exposed to clients which query the renewalInfo -// endpoint specified in draft-aaron-ari. -type RenewalInfo struct { - SuggestedWindow SuggestedWindow `json:"suggestedWindow"` -} - -// RenewalInfoSimple constructs a `RenewalInfo` object and suggested window -// using a very simple renewal calculation: calculate a point 2/3rds of the way -// through the validity period, then give a 2-day window around that. Both the -// `issued` and `expires` timestamps are expected to be UTC. -func RenewalInfoSimple(issued time.Time, expires time.Time) RenewalInfo { - validity := expires.Add(time.Second).Sub(issued) - renewalOffset := validity / time.Duration(3) - idealRenewal := expires.Add(-renewalOffset) - return RenewalInfo{ - SuggestedWindow: SuggestedWindow{ - Start: idealRenewal.Add(-24 * time.Hour), - End: idealRenewal.Add(24 * time.Hour), - }, - } -} - -// RenewalInfoImmediate constructs a `RenewalInfo` object with a suggested -// window in the past. Per the draft-ietf-acme-ari-01 spec, clients should -// attempt to renew immediately if the suggested window is in the past. The -// passed `now` is assumed to be a timestamp representing the current moment in -// time. -func RenewalInfoImmediate(now time.Time) RenewalInfo { - oneHourAgo := now.Add(-1 * time.Hour) - return RenewalInfo{ - SuggestedWindow: SuggestedWindow{ - Start: oneHourAgo, - End: oneHourAgo.Add(time.Minute * 30), - }, - } -} diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go deleted file mode 100644 index d7fe026689..0000000000 --- a/vendor/github.com/letsencrypt/boulder/core/util.go +++ /dev/null @@ -1,309 +0,0 @@ -package core - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "errors" - "expvar" - "fmt" - "io" - "math/big" - mrand "math/rand" - "os" - "path" - "reflect" - "regexp" - "sort" - "strings" - "time" - "unicode" - - "gopkg.in/go-jose/go-jose.v2" -) - -const Unspecified = "Unspecified" - -// Package Variables Variables - -// BuildID is set by the compiler (using -ldflags "-X core.BuildID $(git rev-parse --short HEAD)") -// and is used by GetBuildID -var BuildID string - -// BuildHost is set by the compiler and is used by GetBuildHost -var BuildHost string - -// BuildTime is set by the compiler and is used by GetBuildTime -var BuildTime string - -func init() { - expvar.NewString("BuildID").Set(BuildID) - expvar.NewString("BuildTime").Set(BuildTime) -} - -// Random stuff - -type randSource interface { - Read(p []byte) (n int, err error) -} - -// RandReader is used so that it can be replaced in tests that require -// deterministic output -var RandReader randSource = rand.Reader - -// RandomString returns a randomly generated string of the requested length. -func RandomString(byteLength int) string { - b := make([]byte, byteLength) - _, err := io.ReadFull(RandReader, b) - if err != nil { - panic(fmt.Sprintf("Error reading random bytes: %s", err)) - } - return base64.RawURLEncoding.EncodeToString(b) -} - -// NewToken produces a random string for Challenges, etc. -func NewToken() string { - return RandomString(32) -} - -var tokenFormat = regexp.MustCompile(`^[\w-]{43}$`) - -// LooksLikeAToken checks whether a string represents a 32-octet value in -// the URL-safe base64 alphabet. -func LooksLikeAToken(token string) bool { - return tokenFormat.MatchString(token) -} - -// Fingerprints - -// Fingerprint256 produces an unpadded, URL-safe Base64-encoded SHA256 digest -// of the data. -func Fingerprint256(data []byte) string { - d := sha256.New() - _, _ = d.Write(data) // Never returns an error - return base64.RawURLEncoding.EncodeToString(d.Sum(nil)) -} - -type Sha256Digest [sha256.Size]byte - -// KeyDigest produces a Base64-encoded SHA256 digest of a -// provided public key. -func KeyDigest(key crypto.PublicKey) (Sha256Digest, error) { - switch t := key.(type) { - case *jose.JSONWebKey: - if t == nil { - return Sha256Digest{}, errors.New("cannot compute digest of nil key") - } - return KeyDigest(t.Key) - case jose.JSONWebKey: - return KeyDigest(t.Key) - default: - keyDER, err := x509.MarshalPKIXPublicKey(key) - if err != nil { - return Sha256Digest{}, err - } - return sha256.Sum256(keyDER), nil - } -} - -// KeyDigestB64 produces a padded, standard Base64-encoded SHA256 digest of a -// provided public key. -func KeyDigestB64(key crypto.PublicKey) (string, error) { - digest, err := KeyDigest(key) - if err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(digest[:]), nil -} - -// KeyDigestEquals determines whether two public keys have the same digest. -func KeyDigestEquals(j, k crypto.PublicKey) bool { - digestJ, errJ := KeyDigestB64(j) - digestK, errK := KeyDigestB64(k) - // Keys that don't have a valid digest (due to marshalling problems) - // are never equal. So, e.g. nil keys are not equal. - if errJ != nil || errK != nil { - return false - } - return digestJ == digestK -} - -// PublicKeysEqual determines whether two public keys are identical. -func PublicKeysEqual(a, b crypto.PublicKey) (bool, error) { - switch ak := a.(type) { - case *rsa.PublicKey: - return ak.Equal(b), nil - case *ecdsa.PublicKey: - return ak.Equal(b), nil - default: - return false, fmt.Errorf("unsupported public key type %T", ak) - } -} - -// SerialToString converts a certificate serial number (big.Int) to a String -// consistently. -func SerialToString(serial *big.Int) string { - return fmt.Sprintf("%036x", serial) -} - -// StringToSerial converts a string into a certificate serial number (big.Int) -// consistently. -func StringToSerial(serial string) (*big.Int, error) { - var serialNum big.Int - if !ValidSerial(serial) { - return &serialNum, fmt.Errorf("invalid serial number %q", serial) - } - _, err := fmt.Sscanf(serial, "%036x", &serialNum) - return &serialNum, err -} - -// ValidSerial tests whether the input string represents a syntactically -// valid serial number, i.e., that it is a valid hex string between 32 -// and 36 characters long. -func ValidSerial(serial string) bool { - // Originally, serial numbers were 32 hex characters long. We later increased - // them to 36, but we allow the shorter ones because they exist in some - // production databases. - if len(serial) != 32 && len(serial) != 36 { - return false - } - _, err := hex.DecodeString(serial) - return err == nil -} - -// GetBuildID identifies what build is running. -func GetBuildID() (retID string) { - retID = BuildID - if retID == "" { - retID = Unspecified - } - return -} - -// GetBuildTime identifies when this build was made -func GetBuildTime() (retID string) { - retID = BuildTime - if retID == "" { - retID = Unspecified - } - return -} - -// GetBuildHost identifies the building host -func GetBuildHost() (retID string) { - retID = BuildHost - if retID == "" { - retID = Unspecified - } - return -} - -// IsAnyNilOrZero returns whether any of the supplied values are nil, or (if not) -// if any of them is its type's zero-value. This is useful for validating that -// all required fields on a proto message are present. -func IsAnyNilOrZero(vals ...interface{}) bool { - for _, val := range vals { - switch v := val.(type) { - case nil: - return true - case []byte: - if len(v) == 0 { - return true - } - default: - if reflect.ValueOf(v).IsZero() { - return true - } - } - } - return false -} - -// UniqueLowerNames returns the set of all unique names in the input after all -// of them are lowercased. The returned names will be in their lowercased form -// and sorted alphabetically. -func UniqueLowerNames(names []string) (unique []string) { - nameMap := make(map[string]int, len(names)) - for _, name := range names { - nameMap[strings.ToLower(name)] = 1 - } - - unique = make([]string, 0, len(nameMap)) - for name := range nameMap { - unique = append(unique, name) - } - sort.Strings(unique) - return -} - -// HashNames returns a hash of the names requested. This is intended for use -// when interacting with the orderFqdnSets table and rate limiting. -func HashNames(names []string) []byte { - names = UniqueLowerNames(names) - hash := sha256.Sum256([]byte(strings.Join(names, ","))) - return hash[:] -} - -// LoadCert loads a PEM certificate specified by filename or returns an error -func LoadCert(filename string) (*x509.Certificate, error) { - certPEM, err := os.ReadFile(filename) - if err != nil { - return nil, err - } - block, _ := pem.Decode(certPEM) - if block == nil { - return nil, fmt.Errorf("no data in cert PEM file %q", filename) - } - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - return cert, nil -} - -// retryJitter is used to prevent bunched retried queries from falling into lockstep -const retryJitter = 0.2 - -// RetryBackoff calculates a backoff time based on number of retries, will always -// add jitter so requests that start in unison won't fall into lockstep. Because of -// this the returned duration can always be larger than the maximum by a factor of -// retryJitter. Adapted from -// https://github.com/grpc/grpc-go/blob/v1.11.3/backoff.go#L77-L96 -func RetryBackoff(retries int, base, max time.Duration, factor float64) time.Duration { - if retries == 0 { - return 0 - } - backoff, fMax := float64(base), float64(max) - for backoff < fMax && retries > 1 { - backoff *= factor - retries-- - } - if backoff > fMax { - backoff = fMax - } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= (1 - retryJitter) + 2*retryJitter*mrand.Float64() - return time.Duration(backoff) -} - -// IsASCII determines if every character in a string is encoded in -// the ASCII character set. -func IsASCII(str string) bool { - for _, r := range str { - if r > unicode.MaxASCII { - return false - } - } - return true -} - -func Command() string { - return path.Base(os.Args[0]) -} diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go deleted file mode 100644 index 198c09db4e..0000000000 --- a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go +++ /dev/null @@ -1,95 +0,0 @@ -package goodkey - -import ( - "crypto" - "crypto/sha256" - "encoding/base64" - "encoding/hex" - "errors" - "os" - - "github.com/letsencrypt/boulder/core" - "github.com/letsencrypt/boulder/strictyaml" -) - -// blockedKeys is a type for maintaining a map of SHA256 hashes -// of SubjectPublicKeyInfo's that should be considered blocked. -// blockedKeys are created by using loadBlockedKeysList. -type blockedKeys map[core.Sha256Digest]bool - -var ErrWrongDecodedSize = errors.New("not enough bytes decoded for sha256 hash") - -// blocked checks if the given public key is considered administratively -// blocked based on a SHA256 hash of the SubjectPublicKeyInfo. -// Important: blocked should not be called except on a blockedKeys instance -// returned from loadBlockedKeysList. -// function should not be used until after `loadBlockedKeysList` has returned. -func (b blockedKeys) blocked(key crypto.PublicKey) (bool, error) { - hash, err := core.KeyDigest(key) - if err != nil { - // the bool result should be ignored when err is != nil but to be on the - // paranoid side return true anyway so that a key we can't compute the - // digest for will always be blocked even if a caller foolishly discards the - // err result. - return true, err - } - return b[hash], nil -} - -// loadBlockedKeysList creates a blockedKeys object that can be used to check if -// a key is blocked. It creates a lookup map from a list of -// SHA256 hashes of SubjectPublicKeyInfo's in the input YAML file -// with the expected format: -// -// blocked: -// - cuwGhNNI6nfob5aqY90e7BleU6l7rfxku4X3UTJ3Z7M= -// -// - Qebc1V3SkX3izkYRGNJilm9Bcuvf0oox4U2Rn+b4JOE= -// -// If no hashes are found in the input YAML an error is returned. -func loadBlockedKeysList(filename string) (*blockedKeys, error) { - yamlBytes, err := os.ReadFile(filename) - if err != nil { - return nil, err - } - - var list struct { - BlockedHashes []string `yaml:"blocked"` - BlockedHashesHex []string `yaml:"blockedHashesHex"` - } - err = strictyaml.Unmarshal(yamlBytes, &list) - if err != nil { - return nil, err - } - - if len(list.BlockedHashes) == 0 && len(list.BlockedHashesHex) == 0 { - return nil, errors.New("no blocked hashes in YAML") - } - - blockedKeys := make(blockedKeys, len(list.BlockedHashes)+len(list.BlockedHashesHex)) - for _, b64Hash := range list.BlockedHashes { - decoded, err := base64.StdEncoding.DecodeString(b64Hash) - if err != nil { - return nil, err - } - if len(decoded) != sha256.Size { - return nil, ErrWrongDecodedSize - } - var sha256Digest core.Sha256Digest - copy(sha256Digest[:], decoded[0:sha256.Size]) - blockedKeys[sha256Digest] = true - } - for _, hexHash := range list.BlockedHashesHex { - decoded, err := hex.DecodeString(hexHash) - if err != nil { - return nil, err - } - if len(decoded) != sha256.Size { - return nil, ErrWrongDecodedSize - } - var sha256Digest core.Sha256Digest - copy(sha256Digest[:], decoded[0:sha256.Size]) - blockedKeys[sha256Digest] = true - } - return &blockedKeys, nil -} diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go deleted file mode 100644 index 087a018123..0000000000 --- a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go +++ /dev/null @@ -1,421 +0,0 @@ -package goodkey - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "errors" - "fmt" - "math/big" - "sync" - - "github.com/letsencrypt/boulder/core" - - "github.com/titanous/rocacheck" -) - -// To generate, run: primes 2 752 | tr '\n' , -var smallPrimeInts = []int64{ - 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, - 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, - 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, - 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, - 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, - 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, - 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, - 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, - 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, - 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, - 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, - 719, 727, 733, 739, 743, 751, -} - -// singleton defines the object of a Singleton pattern -var ( - smallPrimesSingleton sync.Once - smallPrimesProduct *big.Int -) - -type Config struct { - // WeakKeyFile is the path to a JSON file containing truncated modulus hashes - // of known weak RSA keys. If this config value is empty, then RSA modulus - // hash checking will be disabled. - WeakKeyFile string - // BlockedKeyFile is the path to a YAML file containing base64-encoded SHA256 - // hashes of PKIX Subject Public Keys that should be blocked. If this config - // value is empty, then blocked key checking will be disabled. - BlockedKeyFile string - // FermatRounds is an integer number of rounds of Fermat's factorization - // method that should be performed to attempt to detect keys whose modulus can - // be trivially factored because the two factors are very close to each other. - // If this config value is empty (0), no factorization will be attempted. - FermatRounds int -} - -// ErrBadKey represents an error with a key. It is distinct from the various -// ways in which an ACME request can have an erroneous key (BadPublicKeyError, -// BadCSRError) because this library is used to check both JWS signing keys and -// keys in CSRs. -var ErrBadKey = errors.New("") - -func badKey(msg string, args ...interface{}) error { - return fmt.Errorf("%w%s", ErrBadKey, fmt.Errorf(msg, args...)) -} - -// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey functionality to KeyPolicy, -// rather than storing a full sa.SQLStorageAuthority. This allows external -// users who don’t want to import all of boulder/sa, and makes testing -// significantly simpler. -// On success, the function returns a boolean which is true if the key is blocked. -type BlockedKeyCheckFunc func(ctx context.Context, keyHash []byte) (bool, error) - -// KeyPolicy determines which types of key may be used with various boulder -// operations. -type KeyPolicy struct { - AllowRSA bool // Whether RSA keys should be allowed. - AllowECDSANISTP256 bool // Whether ECDSA NISTP256 keys should be allowed. - AllowECDSANISTP384 bool // Whether ECDSA NISTP384 keys should be allowed. - weakRSAList *WeakRSAKeys - blockedList *blockedKeys - fermatRounds int - blockedCheck BlockedKeyCheckFunc -} - -// NewKeyPolicy returns a KeyPolicy that allows RSA, ECDSA256 and ECDSA384. -// weakKeyFile contains the path to a JSON file containing truncated modulus -// hashes of known weak RSA keys. If this argument is empty RSA modulus hash -// checking will be disabled. blockedKeyFile contains the path to a YAML file -// containing Base64 encoded SHA256 hashes of pkix subject public keys that -// should be blocked. If this argument is empty then no blocked key checking is -// performed. -func NewKeyPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) { - kp := KeyPolicy{ - AllowRSA: true, - AllowECDSANISTP256: true, - AllowECDSANISTP384: true, - blockedCheck: bkc, - } - if config.WeakKeyFile != "" { - keyList, err := LoadWeakRSASuffixes(config.WeakKeyFile) - if err != nil { - return KeyPolicy{}, err - } - kp.weakRSAList = keyList - } - if config.BlockedKeyFile != "" { - blocked, err := loadBlockedKeysList(config.BlockedKeyFile) - if err != nil { - return KeyPolicy{}, err - } - kp.blockedList = blocked - } - if config.FermatRounds < 0 { - return KeyPolicy{}, fmt.Errorf("Fermat factorization rounds cannot be negative: %d", config.FermatRounds) - } - kp.fermatRounds = config.FermatRounds - return kp, nil -} - -// GoodKey returns true if the key is acceptable for both TLS use and account -// key use (our requirements are the same for either one), according to basic -// strength and algorithm checking. GoodKey only supports pointers: *rsa.PublicKey -// and *ecdsa.PublicKey. It will reject non-pointer types. -// TODO: Support JSONWebKeys once go-jose migration is done. -func (policy *KeyPolicy) GoodKey(ctx context.Context, key crypto.PublicKey) error { - // Early rejection of unacceptable key types to guard subsequent checks. - switch t := key.(type) { - case *rsa.PublicKey, *ecdsa.PublicKey: - break - default: - return badKey("unsupported key type %T", t) - } - // If there is a blocked list configured then check if the public key is one - // that has been administratively blocked. - if policy.blockedList != nil { - if blocked, err := policy.blockedList.blocked(key); err != nil { - return fmt.Errorf("error checking blocklist for key: %v", key) - } else if blocked { - return badKey("public key is forbidden") - } - } - if policy.blockedCheck != nil { - digest, err := core.KeyDigest(key) - if err != nil { - return badKey("%w", err) - } - exists, err := policy.blockedCheck(ctx, digest[:]) - if err != nil { - return err - } else if exists { - return badKey("public key is forbidden") - } - } - switch t := key.(type) { - case *rsa.PublicKey: - return policy.goodKeyRSA(t) - case *ecdsa.PublicKey: - return policy.goodKeyECDSA(t) - default: - return badKey("unsupported key type %T", key) - } -} - -// GoodKeyECDSA determines if an ECDSA pubkey meets our requirements -func (policy *KeyPolicy) goodKeyECDSA(key *ecdsa.PublicKey) (err error) { - // Check the curve. - // - // The validity of the curve is an assumption for all following tests. - err = policy.goodCurve(key.Curve) - if err != nil { - return err - } - - // Key validation routine adapted from NIST SP800-56A § 5.6.2.3.2. - // - // - // Assuming a prime field since a) we are only allowing such curves and b) - // crypto/elliptic only supports prime curves. Where this assumption - // simplifies the code below, it is explicitly stated and explained. If ever - // adapting this code to support non-prime curves, refer to NIST SP800-56A § - // 5.6.2.3.2 and adapt this code appropriately. - params := key.Params() - - // SP800-56A § 5.6.2.3.2 Step 1. - // Partial check of the public key for an invalid range in the EC group: - // Verify that key is not the point at infinity O. - // This code assumes that the point at infinity is (0,0), which is the - // case for all supported curves. - if isPointAtInfinityNISTP(key.X, key.Y) { - return badKey("key x, y must not be the point at infinity") - } - - // SP800-56A § 5.6.2.3.2 Step 2. - // "Verify that x_Q and y_Q are integers in the interval [0,p-1] in the - // case that q is an odd prime p, or that x_Q and y_Q are bit strings - // of length m bits in the case that q = 2**m." - // - // Prove prime field: ASSUMED. - // Prove q != 2: ASSUMED. (Curve parameter. No supported curve has q == 2.) - // Prime field && q != 2 => q is an odd prime p - // Therefore "verify that x, y are in [0, p-1]" satisfies step 2. - // - // Therefore verify that both x and y of the public key point have the unique - // correct representation of an element in the underlying field by verifying - // that x and y are integers in [0, p-1]. - if key.X.Sign() < 0 || key.Y.Sign() < 0 { - return badKey("key x, y must not be negative") - } - - if key.X.Cmp(params.P) >= 0 || key.Y.Cmp(params.P) >= 0 { - return badKey("key x, y must not exceed P-1") - } - - // SP800-56A § 5.6.2.3.2 Step 3. - // "If q is an odd prime p, verify that (y_Q)**2 === (x_Q)***3 + a*x_Q + b (mod p). - // If q = 2**m, verify that (y_Q)**2 + (x_Q)*(y_Q) == (x_Q)**3 + a*(x_Q)*2 + b in - // the finite field of size 2**m. - // (Ensures that the public key is on the correct elliptic curve.)" - // - // q is an odd prime p: proven/assumed above. - // a = -3 for all supported curves. - // - // Therefore step 3 is satisfied simply by showing that - // y**2 === x**3 - 3*x + B (mod P). - // - // This proves that the public key is on the correct elliptic curve. - // But in practice, this test is provided by crypto/elliptic, so use that. - if !key.Curve.IsOnCurve(key.X, key.Y) { - return badKey("key point is not on the curve") - } - - // SP800-56A § 5.6.2.3.2 Step 4. - // "Verify that n*Q == Ø. - // (Ensures that the public key has the correct order. Along with check 1, - // ensures that the public key is in the correct range in the correct EC - // subgroup, that is, it is in the correct EC subgroup and is not the - // identity element.)" - // - // Ensure that public key has the correct order: - // verify that n*Q = Ø. - // - // n*Q = Ø iff n*Q is the point at infinity (see step 1). - ox, oy := key.Curve.ScalarMult(key.X, key.Y, params.N.Bytes()) - if !isPointAtInfinityNISTP(ox, oy) { - return badKey("public key does not have correct order") - } - - // End of SP800-56A § 5.6.2.3.2 Public Key Validation Routine. - // Key is valid. - return nil -} - -// Returns true iff the point (x,y) on NIST P-256, NIST P-384 or NIST P-521 is -// the point at infinity. These curves all have the same point at infinity -// (0,0). This function must ONLY be used on points on curves verified to have -// (0,0) as their point at infinity. -func isPointAtInfinityNISTP(x, y *big.Int) bool { - return x.Sign() == 0 && y.Sign() == 0 -} - -// GoodCurve determines if an elliptic curve meets our requirements. -func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) { - // Simply use a whitelist for now. - params := c.Params() - switch { - case policy.AllowECDSANISTP256 && params == elliptic.P256().Params(): - return nil - case policy.AllowECDSANISTP384 && params == elliptic.P384().Params(): - return nil - default: - return badKey("ECDSA curve %v not allowed", params.Name) - } -} - -// Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple -// of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes -// Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which -// have a known method to easily compute their private key, such as Debian Weak -// Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at -// common key sizes, so we restrict all issuance to those common key sizes. -var acceptableRSAKeySizes = map[int]bool{ - 2048: true, - 3072: true, - 4096: true, -} - -// GoodKeyRSA determines if a RSA pubkey meets our requirements -func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) { - if !policy.AllowRSA { - return badKey("RSA keys are not allowed") - } - if policy.weakRSAList != nil && policy.weakRSAList.Known(key) { - return badKey("key is on a known weak RSA key list") - } - - modulus := key.N - - // See comment on acceptableRSAKeySizes above. - modulusBitLen := modulus.BitLen() - if !acceptableRSAKeySizes[modulusBitLen] { - return badKey("key size not supported: %d", modulusBitLen) - } - - // Rather than support arbitrary exponents, which significantly increases - // the size of the key space we allow, we restrict E to the defacto standard - // RSA exponent 65537. There is no specific standards document that specifies - // 65537 as the 'best' exponent, but ITU X.509 Annex C suggests there are - // notable merits for using it if using a fixed exponent. - // - // The CABF Baseline Requirements state: - // The CA SHALL confirm that the value of the public exponent is an - // odd number equal to 3 or more. Additionally, the public exponent - // SHOULD be in the range between 2^16 + 1 and 2^256-1. - // - // By only allowing one exponent, which fits these constraints, we satisfy - // these requirements. - if key.E != 65537 { - return badKey("key exponent must be 65537") - } - - // The modulus SHOULD also have the following characteristics: an odd - // number, not the power of a prime, and have no factors smaller than 752. - // TODO: We don't yet check for "power of a prime." - if checkSmallPrimes(modulus) { - return badKey("key divisible by small prime") - } - // Check for weak keys generated by Infineon hardware - // (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17) - if rocacheck.IsWeak(key) { - return badKey("key generated by vulnerable Infineon-based hardware") - } - // Check if the key can be easily factored via Fermat's factorization method. - if policy.fermatRounds > 0 { - err := checkPrimeFactorsTooClose(modulus, policy.fermatRounds) - if err != nil { - return badKey("key generated with factors too close together: %w", err) - } - } - - return nil -} - -// Returns true iff integer i is divisible by any of the primes in smallPrimes. -// -// Short circuits; execution time is dependent on i. Do not use this on secret -// values. -// -// Rather than checking each prime individually (invoking Mod on each), -// multiply the primes together and let GCD do our work for us: if the -// GCD between and is not one, we know we have -// a bad key. This is substantially faster than checking each prime -// individually. -func checkSmallPrimes(i *big.Int) bool { - smallPrimesSingleton.Do(func() { - smallPrimesProduct = big.NewInt(1) - for _, prime := range smallPrimeInts { - smallPrimesProduct.Mul(smallPrimesProduct, big.NewInt(prime)) - } - }) - - // When the GCD is 1, i and smallPrimesProduct are coprime, meaning they - // share no common factors. When the GCD is not one, it is the product of - // all common factors, meaning we've identified at least one small prime - // which invalidates i as a valid key. - - var result big.Int - result.GCD(nil, nil, i, smallPrimesProduct) - return result.Cmp(big.NewInt(1)) != 0 -} - -// Returns an error if the modulus n is able to be factored into primes p and q -// via Fermat's factorization method. This method relies on the two primes being -// very close together, which means that they were almost certainly not picked -// independently from a uniform random distribution. Basically, if we can factor -// the key this easily, so can anyone else. -func checkPrimeFactorsTooClose(n *big.Int, rounds int) error { - // Pre-allocate some big numbers that we'll use a lot down below. - one := big.NewInt(1) - bb := new(big.Int) - - // Any odd integer is equal to a difference of squares of integers: - // n = a^2 - b^2 = (a + b)(a - b) - // Any RSA public key modulus is equal to a product of two primes: - // n = pq - // Here we try to find values for a and b, since doing so also gives us the - // prime factors p = (a + b) and q = (a - b). - - // We start with a close to the square root of the modulus n, to start with - // two candidate prime factors that are as close together as possible and - // work our way out from there. Specifically, we set a = ceil(sqrt(n)), the - // first integer greater than the square root of n. Unfortunately, big.Int's - // built-in square root function takes the floor, so we have to add one to get - // the ceil. - a := new(big.Int) - a.Sqrt(n).Add(a, one) - - // We calculate b2 to see if it is a perfect square (i.e. b^2), and therefore - // b is an integer. Specifically, b2 = a^2 - n. - b2 := new(big.Int) - b2.Mul(a, a).Sub(b2, n) - - for i := 0; i < rounds; i++ { - // To see if b2 is a perfect square, we take its square root, square that, - // and check to see if we got the same result back. - bb.Sqrt(b2).Mul(bb, bb) - if b2.Cmp(bb) == 0 { - // b2 is a perfect square, so we've found integer values of a and b, - // and can easily compute p and q as their sum and difference. - bb.Sqrt(bb) - p := new(big.Int).Add(a, bb) - q := new(big.Int).Sub(a, bb) - return fmt.Errorf("public modulus n = pq factored into p: %s; q: %s", p, q) - } - - // Set up the next iteration by incrementing a by one and recalculating b2. - a.Add(a, one) - b2.Mul(a, a).Sub(b2, n) - } - return nil -} diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/weak.go b/vendor/github.com/letsencrypt/boulder/goodkey/weak.go deleted file mode 100644 index dd7afd5e4c..0000000000 --- a/vendor/github.com/letsencrypt/boulder/goodkey/weak.go +++ /dev/null @@ -1,66 +0,0 @@ -package goodkey - -// This file defines a basic method for testing if a given RSA public key is on one of -// the Debian weak key lists and is therefore considered compromised. Instead of -// directly loading the hash suffixes from the individual lists we flatten them all -// into a single JSON list using cmd/weak-key-flatten for ease of use. - -import ( - "crypto/rsa" - "crypto/sha1" - "encoding/hex" - "encoding/json" - "fmt" - "os" -) - -type truncatedHash [10]byte - -type WeakRSAKeys struct { - suffixes map[truncatedHash]struct{} -} - -func LoadWeakRSASuffixes(path string) (*WeakRSAKeys, error) { - f, err := os.ReadFile(path) - if err != nil { - return nil, err - } - - var suffixList []string - err = json.Unmarshal(f, &suffixList) - if err != nil { - return nil, err - } - - wk := &WeakRSAKeys{suffixes: make(map[truncatedHash]struct{})} - for _, suffix := range suffixList { - err := wk.addSuffix(suffix) - if err != nil { - return nil, err - } - } - return wk, nil -} - -func (wk *WeakRSAKeys) addSuffix(str string) error { - var suffix truncatedHash - decoded, err := hex.DecodeString(str) - if err != nil { - return err - } - if len(decoded) != 10 { - return fmt.Errorf("unexpected suffix length of %d", len(decoded)) - } - copy(suffix[:], decoded) - wk.suffixes[suffix] = struct{}{} - return nil -} - -func (wk *WeakRSAKeys) Known(key *rsa.PublicKey) bool { - // Hash input is in the format "Modulus={upper-case hex of modulus}\n" - hash := sha1.Sum([]byte(fmt.Sprintf("Modulus=%X\n", key.N.Bytes()))) - var suffix truncatedHash - copy(suffix[:], hash[10:]) - _, present := wk.suffixes[suffix] - return present -} diff --git a/vendor/github.com/letsencrypt/boulder/identifier/identifier.go b/vendor/github.com/letsencrypt/boulder/identifier/identifier.go deleted file mode 100644 index cbf228f869..0000000000 --- a/vendor/github.com/letsencrypt/boulder/identifier/identifier.go +++ /dev/null @@ -1,32 +0,0 @@ -// The identifier package defines types for RFC 8555 ACME identifiers. -package identifier - -// IdentifierType is a named string type for registered ACME identifier types. -// See https://tools.ietf.org/html/rfc8555#section-9.7.7 -type IdentifierType string - -const ( - // DNS is specified in RFC 8555 for DNS type identifiers. - DNS = IdentifierType("dns") -) - -// ACMEIdentifier is a struct encoding an identifier that can be validated. The -// protocol allows for different types of identifier to be supported (DNS -// names, IP addresses, etc.), but currently we only support RFC 8555 DNS type -// identifiers for domain names. -type ACMEIdentifier struct { - // Type is the registered IdentifierType of the identifier. - Type IdentifierType `json:"type"` - // Value is the value of the identifier. For a DNS type identifier it is - // a domain name. - Value string `json:"value"` -} - -// DNSIdentifier is a convenience function for creating an ACMEIdentifier with -// Type DNS for a given domain name. -func DNSIdentifier(domain string) ACMEIdentifier { - return ACMEIdentifier{ - Type: DNS, - Value: domain, - } -} diff --git a/vendor/github.com/letsencrypt/boulder/probs/probs.go b/vendor/github.com/letsencrypt/boulder/probs/probs.go deleted file mode 100644 index 2cc766237d..0000000000 --- a/vendor/github.com/letsencrypt/boulder/probs/probs.go +++ /dev/null @@ -1,341 +0,0 @@ -package probs - -import ( - "fmt" - "net/http" - - "github.com/letsencrypt/boulder/identifier" -) - -const ( - // Error types that can be used in ACME payloads. These are sorted in the - // same order as they are defined in RFC8555 Section 6.7. We do not implement - // the `compound`, `externalAccountRequired`, or `userActionRequired` errors, - // because we have no path that would return them. - AccountDoesNotExistProblem = ProblemType("accountDoesNotExist") - AlreadyRevokedProblem = ProblemType("alreadyRevoked") - BadCSRProblem = ProblemType("badCSR") - BadNonceProblem = ProblemType("badNonce") - BadPublicKeyProblem = ProblemType("badPublicKey") - BadRevocationReasonProblem = ProblemType("badRevocationReason") - BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm") - CAAProblem = ProblemType("caa") - ConnectionProblem = ProblemType("connection") - DNSProblem = ProblemType("dns") - InvalidContactProblem = ProblemType("invalidContact") - MalformedProblem = ProblemType("malformed") - OrderNotReadyProblem = ProblemType("orderNotReady") - RateLimitedProblem = ProblemType("rateLimited") - RejectedIdentifierProblem = ProblemType("rejectedIdentifier") - ServerInternalProblem = ProblemType("serverInternal") - TLSProblem = ProblemType("tls") - UnauthorizedProblem = ProblemType("unauthorized") - UnsupportedContactProblem = ProblemType("unsupportedContact") - UnsupportedIdentifierProblem = ProblemType("unsupportedIdentifier") - - ErrorNS = "urn:ietf:params:acme:error:" -) - -// ProblemType defines the error types in the ACME protocol -type ProblemType string - -// ProblemDetails objects represent problem documents -// https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00 -type ProblemDetails struct { - Type ProblemType `json:"type,omitempty"` - Detail string `json:"detail,omitempty"` - // HTTPStatus is the HTTP status code the ProblemDetails should probably be sent - // as. - HTTPStatus int `json:"status,omitempty"` - // SubProblems are optional additional per-identifier problems. See - // RFC 8555 Section 6.7.1: https://tools.ietf.org/html/rfc8555#section-6.7.1 - SubProblems []SubProblemDetails `json:"subproblems,omitempty"` -} - -// SubProblemDetails represents sub-problems specific to an identifier that are -// related to a top-level ProblemDetails. -// See RFC 8555 Section 6.7.1: https://tools.ietf.org/html/rfc8555#section-6.7.1 -type SubProblemDetails struct { - ProblemDetails - Identifier identifier.ACMEIdentifier `json:"identifier"` -} - -func (pd *ProblemDetails) Error() string { - return fmt.Sprintf("%s :: %s", pd.Type, pd.Detail) -} - -// WithSubProblems returns a new ProblemsDetails instance created by adding the -// provided subProbs to the existing ProblemsDetail. -func (pd *ProblemDetails) WithSubProblems(subProbs []SubProblemDetails) *ProblemDetails { - return &ProblemDetails{ - Type: pd.Type, - Detail: pd.Detail, - HTTPStatus: pd.HTTPStatus, - SubProblems: append(pd.SubProblems, subProbs...), - } -} - -// Helper functions which construct the basic RFC8555 Problem Documents, with -// the Type already set and the Details supplied by the caller. - -// AccountDoesNotExist returns a ProblemDetails representing an -// AccountDoesNotExistProblem error -func AccountDoesNotExist(detail string) *ProblemDetails { - return &ProblemDetails{ - Type: AccountDoesNotExistProblem, - Detail: detail, - HTTPStatus: http.StatusBadRequest, - } -} - -// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad -// Request status code. -func AlreadyRevoked(detail string, a ...any) *ProblemDetails { - return &ProblemDetails{ - Type: AlreadyRevokedProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, - } -} - -// BadCSR returns a ProblemDetails representing a BadCSRProblem. -func BadCSR(detail string, a ...any) *ProblemDetails { - return &ProblemDetails{ - Type: BadCSRProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, - } -} - -// BadNonce returns a ProblemDetails with a BadNonceProblem and a 400 Bad -// Request status code. -func BadNonce(detail string) *ProblemDetails { - return &ProblemDetails{ - Type: BadNonceProblem, - Detail: detail, - HTTPStatus: http.StatusBadRequest, - } -} - -// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad -// Request status code. -func BadPublicKey(detail string, a ...any) *ProblemDetails { - return &ProblemDetails{ - Type: BadPublicKeyProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, - } -} - -// BadRevocationReason returns a ProblemDetails representing -// a BadRevocationReasonProblem -func BadRevocationReason(detail string, a ...any) *ProblemDetails { - return &ProblemDetails{ - Type: BadRevocationReasonProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, - } -} - -// BadSignatureAlgorithm returns a ProblemDetails with a BadSignatureAlgorithmProblem -// and a 400 Bad Request status code. -func BadSignatureAlgorithm(detail string, a ...any) *ProblemDetails { - return &ProblemDetails{ - Type: BadSignatureAlgorithmProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, - } -} - -// CAA returns a ProblemDetails representing a CAAProblem -func CAA(detail string) *ProblemDetails { - return &ProblemDetails{ - Type: CAAProblem, - Detail: detail, - HTTPStatus: http.StatusForbidden, - } -} - -// Connection returns a ProblemDetails representing a ConnectionProblem -// error -func Connection(detail string) *ProblemDetails { - return &ProblemDetails{ - Type: ConnectionProblem, - Detail: detail, - HTTPStatus: http.StatusBadRequest, - } -} - -// DNS returns a ProblemDetails representing a DNSProblem -func DNS(detail string) *ProblemDetails { - return &ProblemDetails{ - Type: DNSProblem, - Detail: detail, - HTTPStatus: http.StatusBadRequest, - } -} - -// InvalidContact returns a ProblemDetails representing an InvalidContactProblem. -func InvalidContact(detail string) *ProblemDetails { - return &ProblemDetails{ - Type: InvalidContactProblem, - Detail: detail, - HTTPStatus: http.StatusBadRequest, - } -} - -// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad -// Request status code. -func Malformed(detail string, a ...any) *ProblemDetails { - if len(a) > 0 { - detail = fmt.Sprintf(detail, a...) - } - return &ProblemDetails{ - Type: MalformedProblem, - Detail: detail, - HTTPStatus: http.StatusBadRequest, - } -} - -// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem -func OrderNotReady(detail string, a ...any) *ProblemDetails { - return &ProblemDetails{ - Type: OrderNotReadyProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusForbidden, - } -} - -// RateLimited returns a ProblemDetails representing a RateLimitedProblem error -func RateLimited(detail string) *ProblemDetails { - return &ProblemDetails{ - Type: RateLimitedProblem, - Detail: detail, - HTTPStatus: http.StatusTooManyRequests, - } -} - -// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad -// Request status code. -func RejectedIdentifier(detail string) *ProblemDetails { - return &ProblemDetails{ - Type: RejectedIdentifierProblem, - Detail: detail, - HTTPStatus: http.StatusBadRequest, - } -} - -// ServerInternal returns a ProblemDetails with a ServerInternalProblem and a -// 500 Internal Server Failure status code. -func ServerInternal(detail string) *ProblemDetails { - return &ProblemDetails{ - Type: ServerInternalProblem, - Detail: detail, - HTTPStatus: http.StatusInternalServerError, - } -} - -// TLS returns a ProblemDetails representing a TLSProblem error -func TLS(detail string) *ProblemDetails { - return &ProblemDetails{ - Type: TLSProblem, - Detail: detail, - HTTPStatus: http.StatusBadRequest, - } -} - -// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403 -// Forbidden status code. -func Unauthorized(detail string) *ProblemDetails { - return &ProblemDetails{ - Type: UnauthorizedProblem, - Detail: detail, - HTTPStatus: http.StatusForbidden, - } -} - -// UnsupportedContact returns a ProblemDetails representing an -// UnsupportedContactProblem -func UnsupportedContact(detail string) *ProblemDetails { - return &ProblemDetails{ - Type: UnsupportedContactProblem, - Detail: detail, - HTTPStatus: http.StatusBadRequest, - } -} - -// UnsupportedIdentifier returns a ProblemDetails representing an -// UnsupportedIdentifierProblem -func UnsupportedIdentifier(detail string, a ...any) *ProblemDetails { - return &ProblemDetails{ - Type: UnsupportedIdentifierProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, - } -} - -// Additional helper functions that return variations on MalformedProblem with -// different HTTP status codes set. - -// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request -// Timeout status code. -func Canceled(detail string, a ...any) *ProblemDetails { - if len(a) > 0 { - detail = fmt.Sprintf(detail, a...) - } - return &ProblemDetails{ - Type: MalformedProblem, - Detail: detail, - HTTPStatus: http.StatusRequestTimeout, - } -} - -// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict -// status code. -func Conflict(detail string) *ProblemDetails { - return &ProblemDetails{ - Type: MalformedProblem, - Detail: detail, - HTTPStatus: http.StatusConflict, - } -} - -// ContentLengthRequired returns a ProblemDetails representing a missing -// Content-Length header error -func ContentLengthRequired() *ProblemDetails { - return &ProblemDetails{ - Type: MalformedProblem, - Detail: "missing Content-Length header", - HTTPStatus: http.StatusLengthRequired, - } -} - -// InvalidContentType returns a ProblemDetails suitable for a missing -// ContentType header, or an incorrect ContentType header -func InvalidContentType(detail string) *ProblemDetails { - return &ProblemDetails{ - Type: MalformedProblem, - Detail: detail, - HTTPStatus: http.StatusUnsupportedMediaType, - } -} - -// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP -// method error. -func MethodNotAllowed() *ProblemDetails { - return &ProblemDetails{ - Type: MalformedProblem, - Detail: "Method not allowed", - HTTPStatus: http.StatusMethodNotAllowed, - } -} - -// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found -// status code. -func NotFound(detail string) *ProblemDetails { - return &ProblemDetails{ - Type: MalformedProblem, - Detail: detail, - HTTPStatus: http.StatusNotFound, - } -} diff --git a/vendor/github.com/letsencrypt/boulder/revocation/reasons.go b/vendor/github.com/letsencrypt/boulder/revocation/reasons.go deleted file mode 100644 index 50f556be01..0000000000 --- a/vendor/github.com/letsencrypt/boulder/revocation/reasons.go +++ /dev/null @@ -1,72 +0,0 @@ -package revocation - -import ( - "fmt" - "sort" - "strings" - - "golang.org/x/crypto/ocsp" -) - -// Reason is used to specify a certificate revocation reason -type Reason int - -// ReasonToString provides a map from reason code to string -var ReasonToString = map[Reason]string{ - ocsp.Unspecified: "unspecified", - ocsp.KeyCompromise: "keyCompromise", - ocsp.CACompromise: "cACompromise", - ocsp.AffiliationChanged: "affiliationChanged", - ocsp.Superseded: "superseded", - ocsp.CessationOfOperation: "cessationOfOperation", - ocsp.CertificateHold: "certificateHold", - // 7 is unused - ocsp.RemoveFromCRL: "removeFromCRL", - ocsp.PrivilegeWithdrawn: "privilegeWithdrawn", - ocsp.AACompromise: "aAcompromise", -} - -// UserAllowedReasons contains the subset of Reasons which users are -// allowed to use -var UserAllowedReasons = map[Reason]struct{}{ - ocsp.Unspecified: {}, - ocsp.KeyCompromise: {}, - ocsp.Superseded: {}, - ocsp.CessationOfOperation: {}, -} - -// AdminAllowedReasons contains the subset of Reasons which admins are allowed -// to use. Reasons not found here will soon be forbidden from appearing in CRLs -// or OCSP responses by root programs. -var AdminAllowedReasons = map[Reason]struct{}{ - ocsp.Unspecified: {}, - ocsp.KeyCompromise: {}, - ocsp.Superseded: {}, - ocsp.CessationOfOperation: {}, - ocsp.PrivilegeWithdrawn: {}, -} - -// UserAllowedReasonsMessage contains a string describing a list of user allowed -// revocation reasons. This is useful when a revocation is rejected because it -// is not a valid user supplied reason and the allowed values must be -// communicated. This variable is populated during package initialization. -var UserAllowedReasonsMessage = "" - -func init() { - // Build a slice of ints from the allowed reason codes. - // We want a slice because iterating `UserAllowedReasons` will change order - // and make the message unpredictable and cumbersome for unit testing. - // We use []ints instead of []Reason to use `sort.Ints` without fuss. - var allowed []int - for reason := range UserAllowedReasons { - allowed = append(allowed, int(reason)) - } - sort.Ints(allowed) - - var reasonStrings []string - for _, reason := range allowed { - reasonStrings = append(reasonStrings, fmt.Sprintf("%s (%d)", - ReasonToString[Reason(reason)], reason)) - } - UserAllowedReasonsMessage = strings.Join(reasonStrings, ", ") -} diff --git a/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go b/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go deleted file mode 100644 index 8e3bae9965..0000000000 --- a/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go +++ /dev/null @@ -1,46 +0,0 @@ -// Package strictyaml provides a strict YAML unmarshaller based on `go-yaml/yaml` -package strictyaml - -import ( - "bytes" - "errors" - "fmt" - "io" - - "gopkg.in/yaml.v3" -) - -// Unmarshal takes a byte array and an interface passed by reference. The -// d.Decode will read the next YAML-encoded value from its input and store it in -// the value pointed to by yamlObj. Any config keys from the incoming YAML -// document which do not correspond to expected keys in the config struct will -// result in errors. -// -// TODO(https://github.com/go-yaml/yaml/issues/639): Replace this function with -// yaml.Unmarshal once a more ergonomic way to set unmarshal options is added -// upstream. -func Unmarshal(b []byte, yamlObj interface{}) error { - r := bytes.NewReader(b) - - d := yaml.NewDecoder(r) - d.KnownFields(true) - - // d.Decode will mutate yamlObj - err := d.Decode(yamlObj) - - if err != nil { - // io.EOF is returned when the YAML document is empty. - if errors.Is(err, io.EOF) { - return fmt.Errorf("unmarshalling YAML, bytes cannot be nil: %w", err) - } - return fmt.Errorf("unmarshalling YAML: %w", err) - } - - // As bytes are read by the decoder, the length of the byte buffer should - // decrease. If it doesn't, there's a problem. - if r.Len() != 0 { - return fmt.Errorf("yaml object of size %d bytes had %d bytes of unexpected unconsumed trailers", r.Size(), r.Len()) - } - - return nil -} diff --git a/vendor/github.com/magiconair/properties/.gitignore b/vendor/github.com/magiconair/properties/.gitignore deleted file mode 100644 index e7081ff522..0000000000 --- a/vendor/github.com/magiconair/properties/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -*.sublime-project -*.sublime-workspace -*.un~ -*.swp -.idea/ -*.iml diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md deleted file mode 100644 index 842e8e24fb..0000000000 --- a/vendor/github.com/magiconair/properties/CHANGELOG.md +++ /dev/null @@ -1,205 +0,0 @@ -## Changelog - -### [1.8.7](https://github.com/magiconair/properties/tree/v1.8.7) - 08 Dec 2022 - - * [PR #65](https://github.com/magiconair/properties/pull/65): Speedup Merge - - Thanks to [@AdityaVallabh](https://github.com/AdityaVallabh) for the patch. - - * [PR #66](https://github.com/magiconair/properties/pull/66): use github actions - -### [1.8.6](https://github.com/magiconair/properties/tree/v1.8.6) - 23 Feb 2022 - - * [PR #57](https://github.com/magiconair/properties/pull/57):Fix "unreachable code" lint error - - Thanks to [@ellie](https://github.com/ellie) for the patch. - - * [PR #63](https://github.com/magiconair/properties/pull/63): Make TestMustGetParsedDuration backwards compatible - - This patch ensures that the `TestMustGetParsedDuration` still works with `go1.3` to make the - author happy until it affects real users. - - Thanks to [@maage](https://github.com/maage) for the patch. - -### [1.8.5](https://github.com/magiconair/properties/tree/v1.8.5) - 24 Mar 2021 - - * [PR #55](https://github.com/magiconair/properties/pull/55): Fix: Encoding Bug in Comments - - When reading comments \ are loaded correctly, but when writing they are then - replaced by \\. This leads to wrong comments when writing and reading multiple times. - - Thanks to [@doxsch](https://github.com/doxsch) for the patch. - -### [1.8.4](https://github.com/magiconair/properties/tree/v1.8.4) - 23 Sep 2020 - - * [PR #50](https://github.com/magiconair/properties/pull/50): enhance error message for circular references - - Thanks to [@sriv](https://github.com/sriv) for the patch. - -### [1.8.3](https://github.com/magiconair/properties/tree/v1.8.3) - 14 Sep 2020 - - * [PR #49](https://github.com/magiconair/properties/pull/49): Include the key in error message causing the circular reference - - The change is include the key in the error message which is causing the circular - reference when parsing/loading the properties files. - - Thanks to [@haroon-sheikh](https://github.com/haroon-sheikh) for the patch. - -### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020 - - * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write - - This patch ensures that backslashes are escaped on write. Existing applications which - rely on the old behavior may need to be updated. - - Thanks to [@apesternikov](https://github.com/apesternikov) for the patch. - - * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL() - - Thanks to [@aliras1](https://github.com/aliras1) for the patch. - - * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write() - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - - * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - -### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019 - - * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request - - This patch ensures that in `LoadURL` the response body is always closed. - - Thanks to [@liubog2008](https://github.com/liubog2008) for the patch. - -### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018 - - * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading - - This adds the option to disable property expansion during loading. - - Thanks to [@kmala](https://github.com/kmala) for the patch. - -### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018 - - * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases. - - See PR for an example. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018 - - * [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value - - Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail - with a `circular reference error`. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017 - - * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces - - * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled - - Thanks to [@mgurov](https://github.com/mgurov) for the fix. - -### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017 - - * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically - * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map - -### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017 - - * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency - * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc) - -### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017 - - * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER` - * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs - * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy - * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function - -### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016 - - * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL. - * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string. - * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe) - -### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015 - - * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags. - -### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015 - - * Vendored in gopkg.in/check.v1 - -### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015 - - * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs) - -### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015 - - * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references. - -### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015 - - * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) - -### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015 - - * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty - * Add clickable links to README - -### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014 - - * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with - [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration). - -### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014 - - * Added support for single and multi-line comments (reading, writing and updating) - * The order of keys is now preserved - * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry - * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method - * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1) - -### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014 - - * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one - -### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014 - - * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string - -### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014 - - * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys - * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties - -### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014 - -* Added support for time.Duration -* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom) -* Changed default of MustXXX() failure from panic to log.Fatal - -### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014 - -* Added MustGet... functions -* Added support for int and uint with range checks on 32 bit platforms - -### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014 - -* Renamed from goproperties to properties -* Added support for expansion of environment vars in - filenames and value expressions -* Fixed bug where value expressions were not at the - start of the string - -### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014 - -* Initial release diff --git a/vendor/github.com/magiconair/properties/LICENSE.md b/vendor/github.com/magiconair/properties/LICENSE.md deleted file mode 100644 index 79c87e3e6f..0000000000 --- a/vendor/github.com/magiconair/properties/LICENSE.md +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013-2020, Frank Schroeder - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md deleted file mode 100644 index e2edda025b..0000000000 --- a/vendor/github.com/magiconair/properties/README.md +++ /dev/null @@ -1,128 +0,0 @@ -[![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) -[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) -[![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) -[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) - -# Overview - -#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why. - -properties is a Go library for reading and writing properties files. - -It supports reading from multiple files or URLs and Spring style recursive -property expansion of expressions like `${key}` to their corresponding value. -Value expressions can refer to other keys like in `${key}` or to environment -variables like in `${USER}`. Filenames can also contain environment variables -like in `/home/${USER}/myapp.properties`. - -Properties can be decoded into structs, maps, arrays and values through -struct tags. - -Comments and the order of keys are preserved. Comments can be modified -and can be written to the output. - -The properties library supports both ISO-8859-1 and UTF-8 encoded data. - -Starting from version 1.3.0 the behavior of the MustXXX() functions is -configurable by providing a custom `ErrorHandler` function. The default has -changed from `panic` to `log.Fatal` but this is configurable and custom -error handling functions can be provided. See the package documentation for -details. - -Read the full documentation on [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) - -## Getting Started - -```go -import ( - "flag" - "github.com/magiconair/properties" -) - -func main() { - // init from a file - p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8) - - // or multiple files - p = properties.MustLoadFiles([]string{ - "${HOME}/config.properties", - "${HOME}/config-${USER}.properties", - }, properties.UTF8, true) - - // or from a map - p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"}) - - // or from a string - p = properties.MustLoadString("key=value\nabc=def") - - // or from a URL - p = properties.MustLoadURL("http://host/path") - - // or from multiple URLs - p = properties.MustLoadURL([]string{ - "http://host/config", - "http://host/config-${USER}", - }, true) - - // or from flags - p.MustFlag(flag.CommandLine) - - // get values through getters - host := p.MustGetString("host") - port := p.GetInt("port", 8080) - - // or through Decode - type Config struct { - Host string `properties:"host"` - Port int `properties:"port,default=9000"` - Accept []string `properties:"accept,default=image/png;image;gif"` - Timeout time.Duration `properties:"timeout,default=5s"` - } - var cfg Config - if err := p.Decode(&cfg); err != nil { - log.Fatal(err) - } -} - -``` - -## Installation and Upgrade - -``` -$ go get -u github.com/magiconair/properties -``` - -## License - -2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details. - -## ToDo - -* Dump contents with passwords and secrets obscured - -## Updated Git tags - -#### 13 Feb 2018 - -I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags -and I've only recently learned that this doesn't play well with `git describe` 😞 - -I have replaced all lightweight tags with signed tags using this script which should -retain the commit date, name and email address. Please run `git pull --tags` to update them. - -Worst case you have to reclone the repo. - -```shell -#!/bin/bash -tag=$1 -echo "Updating $tag" -date=$(git show ${tag}^0 --format=%aD | head -1) -email=$(git show ${tag}^0 --format=%aE | head -1) -name=$(git show ${tag}^0 --format=%aN | head -1) -GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag} -``` - -I apologize for the inconvenience. - -Frank - diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go deleted file mode 100644 index 8e6aa441d9..0000000000 --- a/vendor/github.com/magiconair/properties/decode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "time" -) - -// Decode assigns property values to exported fields of a struct. -// -// Decode traverses v recursively and returns an error if a value cannot be -// converted to the field type or a required value is missing for a field. -// -// The following type dependent decodings are used: -// -// String, boolean, numeric fields have the value of the property key assigned. -// The property key name is the name of the field. A different key and a default -// value can be set in the field's tag. Fields without default value are -// required. If the value cannot be converted to the field type an error is -// returned. -// -// time.Duration fields have the result of time.ParseDuration() assigned. -// -// time.Time fields have the vaule of time.Parse() assigned. The default layout -// is time.RFC3339 but can be set in the field's tag. -// -// Arrays and slices of string, boolean, numeric, time.Duration and time.Time -// fields have the value interpreted as a comma separated list of values. The -// individual values are trimmed of whitespace and empty values are ignored. A -// default value can be provided as a semicolon separated list in the field's -// tag. -// -// Struct fields are decoded recursively using the field name plus "." as -// prefix. The prefix (without dot) can be overridden in the field's tag. -// Default values are not supported in the field's tag. Specify them on the -// fields of the inner struct instead. -// -// Map fields must have a key of type string and are decoded recursively by -// using the field's name plus ".' as prefix and the next element of the key -// name as map key. The prefix (without dot) can be overridden in the field's -// tag. Default values are not supported. -// -// Examples: -// -// // Field is ignored. -// Field int `properties:"-"` -// -// // Field is assigned value of 'Field'. -// Field int -// -// // Field is assigned value of 'myName'. -// Field int `properties:"myName"` -// -// // Field is assigned value of key 'myName' and has a default -// // value 15 if the key does not exist. -// Field int `properties:"myName,default=15"` -// -// // Field is assigned value of key 'Field' and has a default -// // value 15 if the key does not exist. -// Field int `properties:",default=15"` -// -// // Field is assigned value of key 'date' and the date -// // is in format 2006-01-02 -// Field time.Time `properties:"date,layout=2006-01-02"` -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas. -// Field []string -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas and has a default -// // value ["a", "b", "c"] if the key does not exist. -// Field []string `properties:",default=a;b;c"` -// -// // Field is decoded recursively with "Field." as key prefix. -// Field SomeStruct -// -// // Field is decoded recursively with "myName." as key prefix. -// Field SomeStruct `properties:"myName"` -// -// // Field is decoded recursively with "Field." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string -// -// // Field is decoded recursively with "myName." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string `properties:"myName"` -func (p *Properties) Decode(x interface{}) error { - t, v := reflect.TypeOf(x), reflect.ValueOf(x) - if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct { - return fmt.Errorf("not a pointer to struct: %s", t) - } - if err := dec(p, "", nil, nil, v); err != nil { - return err - } - return nil -} - -func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error { - t := v.Type() - - // value returns the property value for key or the default if provided. - value := func() (string, error) { - if val, ok := p.Get(key); ok { - return val, nil - } - if def != nil { - return *def, nil - } - return "", fmt.Errorf("missing required key %s", key) - } - - // conv converts a string to a value of the given type. - conv := func(s string, t reflect.Type) (val reflect.Value, err error) { - var v interface{} - - switch { - case isDuration(t): - v, err = time.ParseDuration(s) - - case isTime(t): - layout := opts["layout"] - if layout == "" { - layout = time.RFC3339 - } - v, err = time.Parse(layout, s) - - case isBool(t): - v, err = boolVal(s), nil - - case isString(t): - v, err = s, nil - - case isFloat(t): - v, err = strconv.ParseFloat(s, 64) - - case isInt(t): - v, err = strconv.ParseInt(s, 10, 64) - - case isUint(t): - v, err = strconv.ParseUint(s, 10, 64) - - default: - return reflect.Zero(t), fmt.Errorf("unsupported type %s", t) - } - if err != nil { - return reflect.Zero(t), err - } - return reflect.ValueOf(v).Convert(t), nil - } - - // keydef returns the property key and the default value based on the - // name of the struct field and the options in the tag. - keydef := func(f reflect.StructField) (string, *string, map[string]string) { - _key, _opts := parseTag(f.Tag.Get("properties")) - - var _def *string - if d, ok := _opts["default"]; ok { - _def = &d - } - if _key != "" { - return _key, _def, _opts - } - return f.Name, _def, _opts - } - - switch { - case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t): - s, err := value() - if err != nil { - return err - } - val, err := conv(s, t) - if err != nil { - return err - } - v.Set(val) - - case isPtr(t): - return dec(p, key, def, opts, v.Elem()) - - case isStruct(t): - for i := 0; i < v.NumField(); i++ { - fv := v.Field(i) - fk, def, opts := keydef(t.Field(i)) - if !fv.CanSet() { - return fmt.Errorf("cannot set %s", t.Field(i).Name) - } - if fk == "-" { - continue - } - if key != "" { - fk = key + "." + fk - } - if err := dec(p, fk, def, opts, fv); err != nil { - return err - } - } - return nil - - case isArray(t): - val, err := value() - if err != nil { - return err - } - vals := split(val, ";") - a := reflect.MakeSlice(t, 0, len(vals)) - for _, s := range vals { - val, err := conv(s, t.Elem()) - if err != nil { - return err - } - a = reflect.Append(a, val) - } - v.Set(a) - - case isMap(t): - valT := t.Elem() - m := reflect.MakeMap(t) - for postfix := range p.FilterStripPrefix(key + ".").m { - pp := strings.SplitN(postfix, ".", 2) - mk, mv := pp[0], reflect.New(valT) - if err := dec(p, key+"."+mk, nil, nil, mv); err != nil { - return err - } - m.SetMapIndex(reflect.ValueOf(mk), mv.Elem()) - } - v.Set(m) - - default: - return fmt.Errorf("unsupported type %s", t) - } - return nil -} - -// split splits a string on sep, trims whitespace of elements -// and omits empty elements -func split(s string, sep string) []string { - var a []string - for _, v := range strings.Split(s, sep) { - if v = strings.TrimSpace(v); v != "" { - a = append(a, v) - } - } - return a -} - -// parseTag parses a "key,k=v,k=v,..." -func parseTag(tag string) (key string, opts map[string]string) { - opts = map[string]string{} - for i, s := range strings.Split(tag, ",") { - if i == 0 { - key = s - continue - } - - pp := strings.SplitN(s, "=", 2) - if len(pp) == 1 { - opts[pp[0]] = "" - } else { - opts[pp[0]] = pp[1] - } - } - return key, opts -} - -func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice } -func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool } -func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) } -func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map } -func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr } -func isString(t reflect.Type) bool { return t.Kind() == reflect.String } -func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct } -func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) } -func isFloat(t reflect.Type) bool { - return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64 -} -func isInt(t reflect.Type) bool { - return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64 -} -func isUint(t reflect.Type) bool { - return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64 -} diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go deleted file mode 100644 index 7c79793159..0000000000 --- a/vendor/github.com/magiconair/properties/doc.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package properties provides functions for reading and writing -// ISO-8859-1 and UTF-8 encoded .properties files and has -// support for recursive property expansion. -// -// Java properties files are ISO-8859-1 encoded and use Unicode -// literals for characters outside the ISO character set. Unicode -// literals can be used in UTF-8 encoded properties files but -// aren't necessary. -// -// To load a single properties file use MustLoadFile(): -// -// p := properties.MustLoadFile(filename, properties.UTF8) -// -// To load multiple properties files use MustLoadFiles() -// which loads the files in the given order and merges the -// result. Missing properties files can be ignored if the -// 'ignoreMissing' flag is set to true. -// -// Filenames can contain environment variables which are expanded -// before loading. -// -// f1 := "/etc/myapp/myapp.conf" -// f2 := "/home/${USER}/myapp.conf" -// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) -// -// All of the different key/value delimiters ' ', ':' and '=' are -// supported as well as the comment characters '!' and '#' and -// multi-line values. -// -// ! this is a comment -// # and so is this -// -// # the following expressions are equal -// key value -// key=value -// key:value -// key = value -// key : value -// key = val\ -// ue -// -// Properties stores all comments preceding a key and provides -// GetComments() and SetComments() methods to retrieve and -// update them. The convenience functions GetComment() and -// SetComment() allow access to the last comment. The -// WriteComment() method writes properties files including -// the comments and with the keys in the original order. -// This can be used for sanitizing properties files. -// -// Property expansion is recursive and circular references -// and malformed expressions are not allowed and cause an -// error. Expansion of environment variables is supported. -// -// # standard property -// key = value -// -// # property expansion: key2 = value -// key2 = ${key} -// -// # recursive expansion: key3 = value -// key3 = ${key2} -// -// # circular reference (error) -// key = ${key} -// -// # malformed expression (error) -// key = ${ke -// -// # refers to the users' home dir -// home = ${HOME} -// -// # local key takes precedence over env var: u = foo -// USER = foo -// u = ${USER} -// -// The default property expansion format is ${key} but can be -// changed by setting different pre- and postfix values on the -// Properties object. -// -// p := properties.NewProperties() -// p.Prefix = "#[" -// p.Postfix = "]#" -// -// Properties provides convenience functions for getting typed -// values with default values if the key does not exist or the -// type conversion failed. -// -// # Returns true if the value is either "1", "on", "yes" or "true" -// # Returns false for every other value and the default value if -// # the key does not exist. -// v = p.GetBool("key", false) -// -// # Returns the value if the key exists and the format conversion -// # was successful. Otherwise, the default value is returned. -// v = p.GetInt64("key", 999) -// v = p.GetUint64("key", 999) -// v = p.GetFloat64("key", 123.0) -// v = p.GetString("key", "def") -// v = p.GetDuration("key", 999) -// -// As an alternative properties may be applied with the standard -// library's flag implementation at any time. -// -// # Standard configuration -// v = flag.Int("key", 999, "help message") -// flag.Parse() -// -// # Merge p into the flag set -// p.MustFlag(flag.CommandLine) -// -// Properties provides several MustXXX() convenience functions -// which will terminate the app if an error occurs. The behavior -// of the failure is configurable and the default is to call -// log.Fatal(err). To have the MustXXX() functions panic instead -// of logging the error set a different ErrorHandler before -// you use the Properties package. -// -// properties.ErrorHandler = properties.PanicHandler -// -// # Will panic instead of logging an error -// p := properties.MustLoadFile("config.properties") -// -// You can also provide your own ErrorHandler function. The only requirement -// is that the error handler function must exit after handling the error. -// -// properties.ErrorHandler = func(err error) { -// fmt.Println(err) -// os.Exit(1) -// } -// -// # Will write to stdout and then exit -// p := properties.MustLoadFile("config.properties") -// -// Properties can also be loaded into a struct via the `Decode` -// method, e.g. -// -// type S struct { -// A string `properties:"a,default=foo"` -// D time.Duration `properties:"timeout,default=5s"` -// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` -// } -// -// See `Decode()` method for the full documentation. -// -// The following documents provide a description of the properties -// file format. -// -// http://en.wikipedia.org/wiki/.properties -// -// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29 -package properties diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go deleted file mode 100644 index 35d0ae97b3..0000000000 --- a/vendor/github.com/magiconair/properties/integrate.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import "flag" - -// MustFlag sets flags that are skipped by dst.Parse when p contains -// the respective key for flag.Flag.Name. -// -// It's use is recommended with command line arguments as in: -// -// flag.Parse() -// p.MustFlag(flag.CommandLine) -func (p *Properties) MustFlag(dst *flag.FlagSet) { - m := make(map[string]*flag.Flag) - dst.VisitAll(func(f *flag.Flag) { - m[f.Name] = f - }) - dst.Visit(func(f *flag.Flag) { - delete(m, f.Name) // overridden - }) - - for name, f := range m { - v, ok := p.Get(name) - if !ok { - continue - } - - if err := f.Value.Set(v); err != nil { - ErrorHandler(err) - } - } -} diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go deleted file mode 100644 index 3d15a1f6ed..0000000000 --- a/vendor/github.com/magiconair/properties/lex.go +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Parts of the lexer are from the template/text/parser package -// For these parts the following applies: -// -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file of the go 1.2 -// distribution. - -package properties - -import ( - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -// item represents a token or text string returned from the scanner. -type item struct { - typ itemType // The type of this item. - pos int // The starting position, in bytes, of this item in the input string. - val string // The value of this item. -} - -func (i item) String() string { - switch { - case i.typ == itemEOF: - return "EOF" - case i.typ == itemError: - return i.val - case len(i.val) > 10: - return fmt.Sprintf("%.10q...", i.val) - } - return fmt.Sprintf("%q", i.val) -} - -// itemType identifies the type of lex items. -type itemType int - -const ( - itemError itemType = iota // error occurred; value is text of error - itemEOF - itemKey // a key - itemValue // a value - itemComment // a comment -) - -// defines a constant for EOF -const eof = -1 - -// permitted whitespace characters space, FF and TAB -const whitespace = " \f\t" - -// stateFn represents the state of the scanner as a function that returns the next state. -type stateFn func(*lexer) stateFn - -// lexer holds the state of the scanner. -type lexer struct { - input string // the string being scanned - state stateFn // the next lexing function to enter - pos int // current position in the input - start int // start position of this item - width int // width of last rune read from input - lastPos int // position of most recent item returned by nextItem - runes []rune // scanned runes for this item - items chan item // channel of scanned items -} - -// next returns the next rune in the input. -func (l *lexer) next() rune { - if l.pos >= len(l.input) { - l.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = w - l.pos += l.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (l *lexer) backup() { - l.pos -= l.width -} - -// emit passes an item back to the client. -func (l *lexer) emit(t itemType) { - i := item{t, l.start, string(l.runes)} - l.items <- i - l.start = l.pos - l.runes = l.runes[:0] -} - -// ignore skips over the pending input before this point. -func (l *lexer) ignore() { - l.start = l.pos -} - -// appends the rune to the current value -func (l *lexer) appendRune(r rune) { - l.runes = append(l.runes, r) -} - -// accept consumes the next rune if it's from the valid set. -func (l *lexer) accept(valid string) bool { - if strings.ContainsRune(valid, l.next()) { - return true - } - l.backup() - return false -} - -// acceptRun consumes a run of runes from the valid set. -func (l *lexer) acceptRun(valid string) { - for strings.ContainsRune(valid, l.next()) { - } - l.backup() -} - -// lineNumber reports which line we're on, based on the position of -// the previous item returned by nextItem. Doing it this way -// means we don't have to worry about peek double counting. -func (l *lexer) lineNumber() int { - return 1 + strings.Count(l.input[:l.lastPos], "\n") -} - -// errorf returns an error token and terminates the scan by passing -// back a nil pointer that will be the next state, terminating l.nextItem. -func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} - return nil -} - -// nextItem returns the next item from the input. -func (l *lexer) nextItem() item { - i := <-l.items - l.lastPos = i.pos - return i -} - -// lex creates a new scanner for the input string. -func lex(input string) *lexer { - l := &lexer{ - input: input, - items: make(chan item), - runes: make([]rune, 0, 32), - } - go l.run() - return l -} - -// run runs the state machine for the lexer. -func (l *lexer) run() { - for l.state = lexBeforeKey(l); l.state != nil; { - l.state = l.state(l) - } -} - -// state functions - -// lexBeforeKey scans until a key begins. -func lexBeforeKey(l *lexer) stateFn { - switch r := l.next(); { - case isEOF(r): - l.emit(itemEOF) - return nil - - case isEOL(r): - l.ignore() - return lexBeforeKey - - case isComment(r): - return lexComment - - case isWhitespace(r): - l.ignore() - return lexBeforeKey - - default: - l.backup() - return lexKey - } -} - -// lexComment scans a comment line. The comment character has already been scanned. -func lexComment(l *lexer) stateFn { - l.acceptRun(whitespace) - l.ignore() - for { - switch r := l.next(); { - case isEOF(r): - l.ignore() - l.emit(itemEOF) - return nil - case isEOL(r): - l.emit(itemComment) - return lexBeforeKey - default: - l.appendRune(r) - } - } -} - -// lexKey scans the key up to a delimiter -func lexKey(l *lexer) stateFn { - var r rune - -Loop: - for { - switch r = l.next(); { - - case isEscape(r): - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - - case isEndOfKey(r): - l.backup() - break Loop - - case isEOF(r): - break Loop - - default: - l.appendRune(r) - } - } - - if len(l.runes) > 0 { - l.emit(itemKey) - } - - if isEOF(r) { - l.emit(itemEOF) - return nil - } - - return lexBeforeValue -} - -// lexBeforeValue scans the delimiter between key and value. -// Leading and trailing whitespace is ignored. -// We expect to be just after the key. -func lexBeforeValue(l *lexer) stateFn { - l.acceptRun(whitespace) - l.accept(":=") - l.acceptRun(whitespace) - l.ignore() - return lexValue -} - -// lexValue scans text until the end of the line. We expect to be just after the delimiter. -func lexValue(l *lexer) stateFn { - for { - switch r := l.next(); { - case isEscape(r): - if isEOL(l.peek()) { - l.next() - l.acceptRun(whitespace) - } else { - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - } - - case isEOL(r): - l.emit(itemValue) - l.ignore() - return lexBeforeKey - - case isEOF(r): - l.emit(itemValue) - l.emit(itemEOF) - return nil - - default: - l.appendRune(r) - } - } -} - -// scanEscapeSequence scans either one of the escaped characters -// or a unicode literal. We expect to be after the escape character. -func (l *lexer) scanEscapeSequence() error { - switch r := l.next(); { - - case isEscapedCharacter(r): - l.appendRune(decodeEscapedCharacter(r)) - return nil - - case atUnicodeLiteral(r): - return l.scanUnicodeLiteral() - - case isEOF(r): - return fmt.Errorf("premature EOF") - - // silently drop the escape character and append the rune as is - default: - l.appendRune(r) - return nil - } -} - -// scans a unicode literal in the form \uXXXX. We expect to be after the \u. -func (l *lexer) scanUnicodeLiteral() error { - // scan the digits - d := make([]rune, 4) - for i := 0; i < 4; i++ { - d[i] = l.next() - if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) { - return fmt.Errorf("invalid unicode literal") - } - } - - // decode the digits into a rune - r, err := strconv.ParseInt(string(d), 16, 0) - if err != nil { - return err - } - - l.appendRune(rune(r)) - return nil -} - -// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character. -func decodeEscapedCharacter(r rune) rune { - switch r { - case 'f': - return '\f' - case 'n': - return '\n' - case 'r': - return '\r' - case 't': - return '\t' - default: - return r - } -} - -// atUnicodeLiteral reports whether we are at a unicode literal. -// The escape character has already been consumed. -func atUnicodeLiteral(r rune) bool { - return r == 'u' -} - -// isComment reports whether we are at the start of a comment. -func isComment(r rune) bool { - return r == '#' || r == '!' -} - -// isEndOfKey reports whether the rune terminates the current key. -func isEndOfKey(r rune) bool { - return strings.ContainsRune(" \f\t\r\n:=", r) -} - -// isEOF reports whether we are at EOF. -func isEOF(r rune) bool { - return r == eof -} - -// isEOL reports whether we are at a new line character. -func isEOL(r rune) bool { - return r == '\n' || r == '\r' -} - -// isEscape reports whether the rune is the escape character which -// prefixes unicode literals and other escaped characters. -func isEscape(r rune) bool { - return r == '\\' -} - -// isEscapedCharacter reports whether we are at one of the characters that need escaping. -// The escape character has already been consumed. -func isEscapedCharacter(r rune) bool { - return strings.ContainsRune(" :=fnrt", r) -} - -// isWhitespace reports whether the rune is a whitespace character. -func isWhitespace(r rune) bool { - return strings.ContainsRune(whitespace, r) -} diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go deleted file mode 100644 index 635368dc8a..0000000000 --- a/vendor/github.com/magiconair/properties/load.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" -) - -// Encoding specifies encoding of the input data. -type Encoding uint - -const ( - // utf8Default is a private placeholder for the zero value of Encoding to - // ensure that it has the correct meaning. UTF8 is the default encoding but - // was assigned a non-zero value which cannot be changed without breaking - // existing code. Clients should continue to use the public constants. - utf8Default Encoding = iota - - // UTF8 interprets the input data as UTF-8. - UTF8 - - // ISO_8859_1 interprets the input data as ISO-8859-1. - ISO_8859_1 -) - -type Loader struct { - // Encoding determines how the data from files and byte buffers - // is interpreted. For URLs the Content-Type header is used - // to determine the encoding of the data. - Encoding Encoding - - // DisableExpansion configures the property expansion of the - // returned property object. When set to true, the property values - // will not be expanded and the Property object will not be checked - // for invalid expansion expressions. - DisableExpansion bool - - // IgnoreMissing configures whether missing files or URLs which return - // 404 are reported as errors. When set to true, missing files and 404 - // status codes are not reported as errors. - IgnoreMissing bool -} - -// Load reads a buffer into a Properties struct. -func (l *Loader) LoadBytes(buf []byte) (*Properties, error) { - return l.loadBytes(buf, l.Encoding) -} - -// LoadAll reads the content of multiple URLs or files in the given order into -// a Properties struct. If IgnoreMissing is true then a 404 status code or -// missing file will not be reported as error. Encoding sets the encoding for -// files. For the URLs see LoadURL for the Content-Type header and the -// encoding. -func (l *Loader) LoadAll(names []string) (*Properties, error) { - all := NewProperties() - for _, name := range names { - n, err := expandName(name) - if err != nil { - return nil, err - } - - var p *Properties - switch { - case strings.HasPrefix(n, "http://"): - p, err = l.LoadURL(n) - case strings.HasPrefix(n, "https://"): - p, err = l.LoadURL(n) - default: - p, err = l.LoadFile(n) - } - if err != nil { - return nil, err - } - all.Merge(p) - } - - all.DisableExpansion = l.DisableExpansion - if all.DisableExpansion { - return all, nil - } - return all, all.check() -} - -// LoadFile reads a file into a Properties struct. -// If IgnoreMissing is true then a missing file will not be -// reported as error. -func (l *Loader) LoadFile(filename string) (*Properties, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if l.IgnoreMissing && os.IsNotExist(err) { - LogPrintf("properties: %s not found. skipping", filename) - return NewProperties(), nil - } - return nil, err - } - return l.loadBytes(data, l.Encoding) -} - -// LoadURL reads the content of the URL into a Properties struct. -// -// The encoding is determined via the Content-Type header which -// should be set to 'text/plain'. If the 'charset' parameter is -// missing, 'iso-8859-1' or 'latin1' the encoding is set to -// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the -// encoding is set to UTF-8. A missing content type header is -// interpreted as 'text/plain; charset=utf-8'. -func (l *Loader) LoadURL(url string) (*Properties, error) { - resp, err := http.Get(url) - if err != nil { - return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) - } - defer resp.Body.Close() - - if resp.StatusCode == 404 && l.IgnoreMissing { - LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) - return NewProperties(), nil - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) - } - - ct := resp.Header.Get("Content-Type") - ct = strings.Join(strings.Fields(ct), "") - var enc Encoding - switch strings.ToLower(ct) { - case "text/plain", "text/plain;charset=iso-8859-1", "text/plain;charset=latin1": - enc = ISO_8859_1 - case "", "text/plain;charset=utf-8": - enc = UTF8 - default: - return nil, fmt.Errorf("properties: invalid content type %s", ct) - } - - return l.loadBytes(body, enc) -} - -func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) { - p, err := parse(convert(buf, enc)) - if err != nil { - return nil, err - } - p.DisableExpansion = l.DisableExpansion - if p.DisableExpansion { - return p, nil - } - return p, p.check() -} - -// Load reads a buffer into a Properties struct. -func Load(buf []byte, enc Encoding) (*Properties, error) { - l := &Loader{Encoding: enc} - return l.LoadBytes(buf) -} - -// LoadString reads an UTF8 string into a properties struct. -func LoadString(s string) (*Properties, error) { - l := &Loader{Encoding: UTF8} - return l.LoadBytes([]byte(s)) -} - -// LoadMap creates a new Properties struct from a string map. -func LoadMap(m map[string]string) *Properties { - p := NewProperties() - for k, v := range m { - p.Set(k, v) - } - return p -} - -// LoadFile reads a file into a Properties struct. -func LoadFile(filename string, enc Encoding) (*Properties, error) { - l := &Loader{Encoding: enc} - return l.LoadAll([]string{filename}) -} - -// LoadFiles reads multiple files in the given order into -// a Properties struct. If 'ignoreMissing' is true then -// non-existent files will not be reported as error. -func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} - return l.LoadAll(filenames) -} - -// LoadURL reads the content of the URL into a Properties struct. -// See Loader#LoadURL for details. -func LoadURL(url string) (*Properties, error) { - l := &Loader{Encoding: UTF8} - return l.LoadAll([]string{url}) -} - -// LoadURLs reads the content of multiple URLs in the given order into a -// Properties struct. If IgnoreMissing is true then a 404 status code will -// not be reported as error. See Loader#LoadURL for the Content-Type header -// and the encoding. -func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing} - return l.LoadAll(urls) -} - -// LoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. -func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} - return l.LoadAll(names) -} - -// MustLoadString reads an UTF8 string into a Properties struct and -// panics on error. -func MustLoadString(s string) *Properties { - return must(LoadString(s)) -} - -// MustLoadFile reads a file into a Properties struct and -// panics on error. -func MustLoadFile(filename string, enc Encoding) *Properties { - return must(LoadFile(filename, enc)) -} - -// MustLoadFiles reads multiple files in the given order into -// a Properties struct and panics on error. If 'ignoreMissing' -// is true then non-existent files will not be reported as error. -func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadFiles(filenames, enc, ignoreMissing)) -} - -// MustLoadURL reads the content of a URL into a Properties struct and -// panics on error. -func MustLoadURL(url string) *Properties { - return must(LoadURL(url)) -} - -// MustLoadURLs reads the content of multiple URLs in the given order into a -// Properties struct and panics on error. If 'ignoreMissing' is true then a 404 -// status code will not be reported as error. -func MustLoadURLs(urls []string, ignoreMissing bool) *Properties { - return must(LoadURLs(urls, ignoreMissing)) -} - -// MustLoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. It panics on error. -func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadAll(names, enc, ignoreMissing)) -} - -func must(p *Properties, err error) *Properties { - if err != nil { - ErrorHandler(err) - } - return p -} - -// expandName expands ${ENV_VAR} expressions in a name. -// If the environment variable does not exist then it will be replaced -// with an empty string. Malformed expressions like "${ENV_VAR" will -// be reported as error. -func expandName(name string) (string, error) { - return expand(name, []string{}, "${", "}", make(map[string]string)) -} - -// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string. -// For ISO-8859-1 we can convert each byte straight into a rune since the -// first 256 unicode code points cover ISO-8859-1. -func convert(buf []byte, enc Encoding) string { - switch enc { - case utf8Default, UTF8: - return string(buf) - case ISO_8859_1: - runes := make([]rune, len(buf)) - for i, b := range buf { - runes[i] = rune(b) - } - return string(runes) - default: - ErrorHandler(fmt.Errorf("unsupported encoding %v", enc)) - } - panic("ErrorHandler should exit") -} diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go deleted file mode 100644 index fccfd39f6b..0000000000 --- a/vendor/github.com/magiconair/properties/parser.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "runtime" -) - -type parser struct { - lex *lexer -} - -func parse(input string) (properties *Properties, err error) { - p := &parser{lex: lex(input)} - defer p.recover(&err) - - properties = NewProperties() - key := "" - comments := []string{} - - for { - token := p.expectOneOf(itemComment, itemKey, itemEOF) - switch token.typ { - case itemEOF: - goto done - case itemComment: - comments = append(comments, token.val) - continue - case itemKey: - key = token.val - if _, ok := properties.m[key]; !ok { - properties.k = append(properties.k, key) - } - } - - token = p.expectOneOf(itemValue, itemEOF) - if len(comments) > 0 { - properties.c[key] = comments - comments = []string{} - } - switch token.typ { - case itemEOF: - properties.m[key] = "" - goto done - case itemValue: - properties.m[key] = token.val - } - } - -done: - return properties, nil -} - -func (p *parser) errorf(format string, args ...interface{}) { - format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format) - panic(fmt.Errorf(format, args...)) -} - -func (p *parser) expectOneOf(expected ...itemType) (token item) { - token = p.lex.nextItem() - for _, v := range expected { - if token.typ == v { - return token - } - } - p.unexpected(token) - panic("unexpected token") -} - -func (p *parser) unexpected(token item) { - p.errorf(token.String()) -} - -// recover is the handler that turns panics into returns from the top level of Parse. -func (p *parser) recover(errp *error) { - e := recover() - if e != nil { - if _, ok := e.(runtime.Error); ok { - panic(e) - } - *errp = e.(error) - } -} diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go deleted file mode 100644 index fb2f7b4048..0000000000 --- a/vendor/github.com/magiconair/properties/properties.go +++ /dev/null @@ -1,848 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer. -// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. - -import ( - "bytes" - "fmt" - "io" - "log" - "os" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -const maxExpansionDepth = 64 - -// ErrorHandlerFunc defines the type of function which handles failures -// of the MustXXX() functions. An error handler function must exit -// the application after handling the error. -type ErrorHandlerFunc func(error) - -// ErrorHandler is the function which handles failures of the MustXXX() -// functions. The default is LogFatalHandler. -var ErrorHandler ErrorHandlerFunc = LogFatalHandler - -// LogHandlerFunc defines the function prototype for logging errors. -type LogHandlerFunc func(fmt string, args ...interface{}) - -// LogPrintf defines a log handler which uses log.Printf. -var LogPrintf LogHandlerFunc = log.Printf - -// LogFatalHandler handles the error by logging a fatal error and exiting. -func LogFatalHandler(err error) { - log.Fatal(err) -} - -// PanicHandler handles the error by panicking. -func PanicHandler(err error) { - panic(err) -} - -// ----------------------------------------------------------------------------- - -// A Properties contains the key/value pairs from the properties input. -// All values are stored in unexpanded form and are expanded at runtime -type Properties struct { - // Pre-/Postfix for property expansion. - Prefix string - Postfix string - - // DisableExpansion controls the expansion of properties on Get() - // and the check for circular references on Set(). When set to - // true Properties behaves like a simple key/value store and does - // not check for circular references on Get() or on Set(). - DisableExpansion bool - - // Stores the key/value pairs - m map[string]string - - // Stores the comments per key. - c map[string][]string - - // Stores the keys in order of appearance. - k []string - - // WriteSeparator specifies the separator of key and value while writing the properties. - WriteSeparator string -} - -// NewProperties creates a new Properties struct with the default -// configuration for "${key}" expressions. -func NewProperties() *Properties { - return &Properties{ - Prefix: "${", - Postfix: "}", - m: map[string]string{}, - c: map[string][]string{}, - k: []string{}, - } -} - -// Load reads a buffer into the given Properties struct. -func (p *Properties) Load(buf []byte, enc Encoding) error { - l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion} - newProperties, err := l.LoadBytes(buf) - if err != nil { - return err - } - p.Merge(newProperties) - return nil -} - -// Get returns the expanded value for the given key if exists. -// Otherwise, ok is false. -func (p *Properties) Get(key string) (value string, ok bool) { - v, ok := p.m[key] - if p.DisableExpansion { - return v, ok - } - if !ok { - return "", false - } - - expanded, err := p.expand(key, v) - - // we guarantee that the expanded value is free of - // circular references and malformed expressions - // so we panic if we still get an error here. - if err != nil { - ErrorHandler(err) - } - - return expanded, true -} - -// MustGet returns the expanded value for the given key if exists. -// Otherwise, it panics. -func (p *Properties) MustGet(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// ClearComments removes the comments for all keys. -func (p *Properties) ClearComments() { - p.c = map[string][]string{} -} - -// ---------------------------------------------------------------------------- - -// GetComment returns the last comment before the given key or an empty string. -func (p *Properties) GetComment(key string) string { - comments, ok := p.c[key] - if !ok || len(comments) == 0 { - return "" - } - return comments[len(comments)-1] -} - -// ---------------------------------------------------------------------------- - -// GetComments returns all comments that appeared before the given key or nil. -func (p *Properties) GetComments(key string) []string { - if comments, ok := p.c[key]; ok { - return comments - } - return nil -} - -// ---------------------------------------------------------------------------- - -// SetComment sets the comment for the key. -func (p *Properties) SetComment(key, comment string) { - p.c[key] = []string{comment} -} - -// ---------------------------------------------------------------------------- - -// SetComments sets the comments for the key. If the comments are nil then -// all comments for this key are deleted. -func (p *Properties) SetComments(key string, comments []string) { - if comments == nil { - delete(p.c, key) - return - } - p.c[key] = comments -} - -// ---------------------------------------------------------------------------- - -// GetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the default value is returned. -func (p *Properties) GetBool(key string, def bool) bool { - v, err := p.getBool(key) - if err != nil { - return def - } - return v -} - -// MustGetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the function panics. -func (p *Properties) MustGetBool(key string) bool { - v, err := p.getBool(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getBool(key string) (value bool, err error) { - if v, ok := p.Get(key); ok { - return boolVal(v), nil - } - return false, invalidKeyError(key) -} - -func boolVal(v string) bool { - v = strings.ToLower(v) - return v == "1" || v == "true" || v == "yes" || v == "on" -} - -// ---------------------------------------------------------------------------- - -// GetDuration parses the expanded value as an time.Duration (in ns) if the -// key exists. If key does not exist or the value cannot be parsed the default -// value is returned. In almost all cases you want to use GetParsedDuration(). -func (p *Properties) GetDuration(key string, def time.Duration) time.Duration { - v, err := p.getInt64(key) - if err != nil { - return def - } - return time.Duration(v) -} - -// MustGetDuration parses the expanded value as an time.Duration (in ns) if -// the key exists. If key does not exist or the value cannot be parsed the -// function panics. In almost all cases you want to use MustGetParsedDuration(). -func (p *Properties) MustGetDuration(key string) time.Duration { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return time.Duration(v) -} - -// ---------------------------------------------------------------------------- - -// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration { - s, ok := p.Get(key) - if !ok { - return def - } - v, err := time.ParseDuration(s) - if err != nil { - return def - } - return v -} - -// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetParsedDuration(key string) time.Duration { - s, ok := p.Get(key) - if !ok { - ErrorHandler(invalidKeyError(key)) - } - v, err := time.ParseDuration(s) - if err != nil { - ErrorHandler(err) - } - return v -} - -// ---------------------------------------------------------------------------- - -// GetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetFloat64(key string, def float64) float64 { - v, err := p.getFloat64(key) - if err != nil { - return def - } - return v -} - -// MustGetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetFloat64(key string) float64 { - v, err := p.getFloat64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getFloat64(key string) (value float64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseFloat(v, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetInt(key string, def int) int { - v, err := p.getInt64(key) - if err != nil { - return def - } - return intRangeCheck(key, v) -} - -// MustGetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetInt(key string) int { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return intRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetInt64 parses the expanded value as an int64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetInt64(key string, def int64) int64 { - v, err := p.getInt64(key) - if err != nil { - return def - } - return v -} - -// MustGetInt64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetInt64(key string) int64 { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getInt64(key string) (value int64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseInt(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetUint parses the expanded value as an uint if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetUint(key string, def uint) uint { - v, err := p.getUint64(key) - if err != nil { - return def - } - return uintRangeCheck(key, v) -} - -// MustGetUint parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetUint(key string) uint { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return uintRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetUint64 parses the expanded value as an uint64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetUint64(key string, def uint64) uint64 { - v, err := p.getUint64(key) - if err != nil { - return def - } - return v -} - -// MustGetUint64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetUint64(key string) uint64 { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getUint64(key string) (value uint64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseUint(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetString returns the expanded value for the given key if exists or -// the default value otherwise. -func (p *Properties) GetString(key, def string) string { - if v, ok := p.Get(key); ok { - return v - } - return def -} - -// MustGetString returns the expanded value for the given key if exists or -// panics otherwise. -func (p *Properties) MustGetString(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// Filter returns a new properties object which contains all properties -// for which the key matches the pattern. -func (p *Properties) Filter(pattern string) (*Properties, error) { - re, err := regexp.Compile(pattern) - if err != nil { - return nil, err - } - - return p.FilterRegexp(re), nil -} - -// FilterRegexp returns a new properties object which contains all properties -// for which the key matches the regular expression. -func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties { - pp := NewProperties() - for _, k := range p.k { - if re.MatchString(k) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterPrefix returns a new properties object with a subset of all keys -// with the given prefix. -func (p *Properties) FilterPrefix(prefix string) *Properties { - pp := NewProperties() - for _, k := range p.k { - if strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterStripPrefix returns a new properties object with a subset of all keys -// with the given prefix and the prefix removed from the keys. -func (p *Properties) FilterStripPrefix(prefix string) *Properties { - pp := NewProperties() - n := len(prefix) - for _, k := range p.k { - if len(k) > len(prefix) && strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference - // TODO(fs): this function should probably return an error but the signature is fixed - pp.Set(k[n:], p.m[k]) - } - } - return pp -} - -// Len returns the number of keys. -func (p *Properties) Len() int { - return len(p.m) -} - -// Keys returns all keys in the same order as in the input. -func (p *Properties) Keys() []string { - keys := make([]string, len(p.k)) - copy(keys, p.k) - return keys -} - -// Set sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. If the value contains a -// circular reference or a malformed expression then -// an error is returned. -// An empty key is silently ignored. -func (p *Properties) Set(key, value string) (prev string, ok bool, err error) { - if key == "" { - return "", false, nil - } - - // if expansion is disabled we allow circular references - if p.DisableExpansion { - prev, ok = p.Get(key) - p.m[key] = value - if !ok { - p.k = append(p.k, key) - } - return prev, ok, nil - } - - // to check for a circular reference we temporarily need - // to set the new value. If there is an error then revert - // to the previous state. Only if all tests are successful - // then we add the key to the p.k list. - prev, ok = p.Get(key) - p.m[key] = value - - // now check for a circular reference - _, err = p.expand(key, value) - if err != nil { - - // revert to the previous state - if ok { - p.m[key] = prev - } else { - delete(p.m, key) - } - - return "", false, err - } - - if !ok { - p.k = append(p.k, key) - } - - return prev, ok, nil -} - -// SetValue sets property key to the default string value -// as defined by fmt.Sprintf("%v"). -func (p *Properties) SetValue(key string, value interface{}) error { - _, _, err := p.Set(key, fmt.Sprintf("%v", value)) - return err -} - -// MustSet sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. An empty key is silently ignored. -func (p *Properties) MustSet(key, value string) (prev string, ok bool) { - prev, ok, err := p.Set(key, value) - if err != nil { - ErrorHandler(err) - } - return prev, ok -} - -// String returns a string of all expanded 'key = value' pairs. -func (p *Properties) String() string { - var s string - for _, key := range p.k { - value, _ := p.Get(key) - s = fmt.Sprintf("%s%s = %s\n", s, key, value) - } - return s -} - -// Sort sorts the properties keys in alphabetical order. -// This is helpfully before writing the properties. -func (p *Properties) Sort() { - sort.Strings(p.k) -} - -// Write writes all unexpanded 'key = value' pairs to the given writer. -// Write returns the number of bytes written and any write error encountered. -func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { - return p.WriteComment(w, "", enc) -} - -// WriteComment writes all unexpanced 'key = value' pairs to the given writer. -// If prefix is not empty then comments are written with a blank line and the -// given prefix. The prefix should be either "# " or "! " to be compatible with -// the properties file format. Otherwise, the properties parser will not be -// able to read the file back in. It returns the number of bytes written and -// any write error encountered. -func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) { - var x int - - for _, key := range p.k { - value := p.m[key] - - if prefix != "" { - if comments, ok := p.c[key]; ok { - // don't print comments if they are all empty - allEmpty := true - for _, c := range comments { - if c != "" { - allEmpty = false - break - } - } - - if !allEmpty { - // add a blank line between entries but not at the top - if len(comments) > 0 && n > 0 { - x, err = fmt.Fprintln(w) - if err != nil { - return - } - n += x - } - - for _, c := range comments { - x, err = fmt.Fprintf(w, "%s%s\n", prefix, c) - if err != nil { - return - } - n += x - } - } - } - } - sep := " = " - if p.WriteSeparator != "" { - sep = p.WriteSeparator - } - x, err = fmt.Fprintf(w, "%s%s%s\n", encode(key, " :", enc), sep, encode(value, "", enc)) - if err != nil { - return - } - n += x - } - return -} - -// Map returns a copy of the properties as a map. -func (p *Properties) Map() map[string]string { - m := make(map[string]string) - for k, v := range p.m { - m[k] = v - } - return m -} - -// FilterFunc returns a copy of the properties which includes the values which passed all filters. -func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties { - pp := NewProperties() -outer: - for k, v := range p.m { - for _, f := range filters { - if !f(k, v) { - continue outer - } - pp.Set(k, v) - } - } - return pp -} - -// ---------------------------------------------------------------------------- - -// Delete removes the key and its comments. -func (p *Properties) Delete(key string) { - delete(p.m, key) - delete(p.c, key) - newKeys := []string{} - for _, k := range p.k { - if k != key { - newKeys = append(newKeys, k) - } - } - p.k = newKeys -} - -// Merge merges properties, comments and keys from other *Properties into p -func (p *Properties) Merge(other *Properties) { - for _, k := range other.k { - if _, ok := p.m[k]; !ok { - p.k = append(p.k, k) - } - } - for k, v := range other.m { - p.m[k] = v - } - for k, v := range other.c { - p.c[k] = v - } -} - -// ---------------------------------------------------------------------------- - -// check expands all values and returns an error if a circular reference or -// a malformed expression was found. -func (p *Properties) check() error { - for key, value := range p.m { - if _, err := p.expand(key, value); err != nil { - return err - } - } - return nil -} - -func (p *Properties) expand(key, input string) (string, error) { - // no pre/postfix -> nothing to expand - if p.Prefix == "" && p.Postfix == "" { - return input, nil - } - - return expand(input, []string{key}, p.Prefix, p.Postfix, p.m) -} - -// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values. -// The function keeps track of the keys that were already expanded and stops if it -// detects a circular reference or a malformed expression of the form '(prefix)key'. -func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) { - if len(keys) > maxExpansionDepth { - return "", fmt.Errorf("expansion too deep") - } - - for { - start := strings.Index(s, prefix) - if start == -1 { - return s, nil - } - - keyStart := start + len(prefix) - keyLen := strings.Index(s[keyStart:], postfix) - if keyLen == -1 { - return "", fmt.Errorf("malformed expression") - } - - end := keyStart + keyLen + len(postfix) - 1 - key := s[keyStart : keyStart+keyLen] - - // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key) - - for _, k := range keys { - if key == k { - var b bytes.Buffer - b.WriteString("circular reference in:\n") - for _, k1 := range keys { - fmt.Fprintf(&b, "%s=%s\n", k1, values[k1]) - } - return "", fmt.Errorf(b.String()) - } - } - - val, ok := values[key] - if !ok { - val = os.Getenv(key) - } - new_val, err := expand(val, append(keys, key), prefix, postfix, values) - if err != nil { - return "", err - } - s = s[:start] + new_val + s[end+1:] - } -} - -// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. -func encode(s string, special string, enc Encoding) string { - switch enc { - case UTF8: - return encodeUtf8(s, special) - case ISO_8859_1: - return encodeIso(s, special) - default: - panic(fmt.Sprintf("unsupported encoding %v", enc)) - } -} - -func encodeUtf8(s string, special string) string { - v := "" - for pos := 0; pos < len(s); { - r, w := utf8.DecodeRuneInString(s[pos:]) - pos += w - v += escape(r, special) - } - return v -} - -func encodeIso(s string, special string) string { - var r rune - var w int - var v string - for pos := 0; pos < len(s); { - switch r, w = utf8.DecodeRuneInString(s[pos:]); { - case r < 1<<8: // single byte rune -> escape special chars only - v += escape(r, special) - case r < 1<<16: // two byte rune -> unicode literal - v += fmt.Sprintf("\\u%04x", r) - default: // more than two bytes per rune -> can't encode - v += "?" - } - pos += w - } - return v -} - -func escape(r rune, special string) string { - switch r { - case '\f': - return "\\f" - case '\n': - return "\\n" - case '\r': - return "\\r" - case '\t': - return "\\t" - case '\\': - return "\\\\" - default: - if strings.ContainsRune(special, r) { - return "\\" + string(r) - } - return string(r) - } -} - -func invalidKeyError(key string) error { - return fmt.Errorf("unknown property: %s", key) -} diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go deleted file mode 100644 index dbd60b36e7..0000000000 --- a/vendor/github.com/magiconair/properties/rangecheck.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "math" -) - -// make this a var to overwrite it in a test -var is32Bit = ^uint(0) == math.MaxUint32 - -// intRangeCheck checks if the value fits into the int type and -// panics if it does not. -func intRangeCheck(key string, v int64) int { - if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return int(v) -} - -// uintRangeCheck checks if the value fits into the uint type and -// panics if it does not. -func uintRangeCheck(key string, v uint64) uint { - if is32Bit && v > math.MaxUint32 { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return uint(v) -} diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE deleted file mode 100644 index fbff658f70..0000000000 --- a/vendor/github.com/mailru/easyjson/LICENSE +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (c) 2016 Mail.Ru Group - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go deleted file mode 100644 index 598a54af9d..0000000000 --- a/vendor/github.com/mailru/easyjson/buffer/pool.go +++ /dev/null @@ -1,278 +0,0 @@ -// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to -// reduce copying and to allow reuse of individual chunks. -package buffer - -import ( - "io" - "net" - "sync" -) - -// PoolConfig contains configuration for the allocation and reuse strategy. -type PoolConfig struct { - StartSize int // Minimum chunk size that is allocated. - PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. - MaxSize int // Maximum chunk size that will be allocated. -} - -var config = PoolConfig{ - StartSize: 128, - PooledSize: 512, - MaxSize: 32768, -} - -// Reuse pool: chunk size -> pool. -var buffers = map[int]*sync.Pool{} - -func initBuffers() { - for l := config.PooledSize; l <= config.MaxSize; l *= 2 { - buffers[l] = new(sync.Pool) - } -} - -func init() { - initBuffers() -} - -// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. -func Init(cfg PoolConfig) { - config = cfg - initBuffers() -} - -// putBuf puts a chunk to reuse pool if it can be reused. -func putBuf(buf []byte) { - size := cap(buf) - if size < config.PooledSize { - return - } - if c := buffers[size]; c != nil { - c.Put(buf[:0]) - } -} - -// getBuf gets a chunk from reuse pool or creates a new one if reuse failed. -func getBuf(size int) []byte { - if size >= config.PooledSize { - if c := buffers[size]; c != nil { - v := c.Get() - if v != nil { - return v.([]byte) - } - } - } - return make([]byte, 0, size) -} - -// Buffer is a buffer optimized for serialization without extra copying. -type Buffer struct { - - // Buf is the current chunk that can be used for serialization. - Buf []byte - - toPool []byte - bufs [][]byte -} - -// EnsureSpace makes sure that the current chunk contains at least s free bytes, -// possibly creating a new chunk. -func (b *Buffer) EnsureSpace(s int) { - if cap(b.Buf)-len(b.Buf) < s { - b.ensureSpaceSlow(s) - } -} - -func (b *Buffer) ensureSpaceSlow(s int) { - l := len(b.Buf) - if l > 0 { - if cap(b.toPool) != cap(b.Buf) { - // Chunk was reallocated, toPool can be pooled. - putBuf(b.toPool) - } - if cap(b.bufs) == 0 { - b.bufs = make([][]byte, 0, 8) - } - b.bufs = append(b.bufs, b.Buf) - l = cap(b.toPool) * 2 - } else { - l = config.StartSize - } - - if l > config.MaxSize { - l = config.MaxSize - } - b.Buf = getBuf(l) - b.toPool = b.Buf -} - -// AppendByte appends a single byte to buffer. -func (b *Buffer) AppendByte(data byte) { - b.EnsureSpace(1) - b.Buf = append(b.Buf, data) -} - -// AppendBytes appends a byte slice to buffer. -func (b *Buffer) AppendBytes(data []byte) { - if len(data) <= cap(b.Buf)-len(b.Buf) { - b.Buf = append(b.Buf, data...) // fast path - } else { - b.appendBytesSlow(data) - } -} - -func (b *Buffer) appendBytesSlow(data []byte) { - for len(data) > 0 { - b.EnsureSpace(1) - - sz := cap(b.Buf) - len(b.Buf) - if sz > len(data) { - sz = len(data) - } - - b.Buf = append(b.Buf, data[:sz]...) - data = data[sz:] - } -} - -// AppendString appends a string to buffer. -func (b *Buffer) AppendString(data string) { - if len(data) <= cap(b.Buf)-len(b.Buf) { - b.Buf = append(b.Buf, data...) // fast path - } else { - b.appendStringSlow(data) - } -} - -func (b *Buffer) appendStringSlow(data string) { - for len(data) > 0 { - b.EnsureSpace(1) - - sz := cap(b.Buf) - len(b.Buf) - if sz > len(data) { - sz = len(data) - } - - b.Buf = append(b.Buf, data[:sz]...) - data = data[sz:] - } -} - -// Size computes the size of a buffer by adding sizes of every chunk. -func (b *Buffer) Size() int { - size := len(b.Buf) - for _, buf := range b.bufs { - size += len(buf) - } - return size -} - -// DumpTo outputs the contents of a buffer to a writer and resets the buffer. -func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { - bufs := net.Buffers(b.bufs) - if len(b.Buf) > 0 { - bufs = append(bufs, b.Buf) - } - n, err := bufs.WriteTo(w) - - for _, buf := range b.bufs { - putBuf(buf) - } - putBuf(b.toPool) - - b.bufs = nil - b.Buf = nil - b.toPool = nil - - return int(n), err -} - -// BuildBytes creates a single byte slice with all the contents of the buffer. Data is -// copied if it does not fit in a single chunk. You can optionally provide one byte -// slice as argument that it will try to reuse. -func (b *Buffer) BuildBytes(reuse ...[]byte) []byte { - if len(b.bufs) == 0 { - ret := b.Buf - b.toPool = nil - b.Buf = nil - return ret - } - - var ret []byte - size := b.Size() - - // If we got a buffer as argument and it is big enough, reuse it. - if len(reuse) == 1 && cap(reuse[0]) >= size { - ret = reuse[0][:0] - } else { - ret = make([]byte, 0, size) - } - for _, buf := range b.bufs { - ret = append(ret, buf...) - putBuf(buf) - } - - ret = append(ret, b.Buf...) - putBuf(b.toPool) - - b.bufs = nil - b.toPool = nil - b.Buf = nil - - return ret -} - -type readCloser struct { - offset int - bufs [][]byte -} - -func (r *readCloser) Read(p []byte) (n int, err error) { - for _, buf := range r.bufs { - // Copy as much as we can. - x := copy(p[n:], buf[r.offset:]) - n += x // Increment how much we filled. - - // Did we empty the whole buffer? - if r.offset+x == len(buf) { - // On to the next buffer. - r.offset = 0 - r.bufs = r.bufs[1:] - - // We can release this buffer. - putBuf(buf) - } else { - r.offset += x - } - - if n == len(p) { - break - } - } - // No buffers left or nothing read? - if len(r.bufs) == 0 { - err = io.EOF - } - return -} - -func (r *readCloser) Close() error { - // Release all remaining buffers. - for _, buf := range r.bufs { - putBuf(buf) - } - // In case Close gets called multiple times. - r.bufs = nil - - return nil -} - -// ReadCloser creates an io.ReadCloser with all the contents of the buffer. -func (b *Buffer) ReadCloser() io.ReadCloser { - ret := &readCloser{0, append(b.bufs, b.Buf)} - - b.bufs = nil - b.toPool = nil - b.Buf = nil - - return ret -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go deleted file mode 100644 index ff7b27c5b2..0000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go +++ /dev/null @@ -1,24 +0,0 @@ -// This file will only be included to the build if neither -// easyjson_nounsafe nor appengine build tag is set. See README notes -// for more details. - -//+build !easyjson_nounsafe -//+build !appengine - -package jlexer - -import ( - "reflect" - "unsafe" -) - -// bytesToStr creates a string pointing at the slice to avoid copying. -// -// Warning: the string returned by the function should be used with care, as the whole input data -// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data -// may be garbage-collected even when the string exists. -func bytesToStr(data []byte) string { - h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) - shdr := reflect.StringHeader{Data: h.Data, Len: h.Len} - return *(*string)(unsafe.Pointer(&shdr)) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go deleted file mode 100644 index 864d1be676..0000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go +++ /dev/null @@ -1,13 +0,0 @@ -// This file is included to the build if any of the buildtags below -// are defined. Refer to README notes for more details. - -//+build easyjson_nounsafe appengine - -package jlexer - -// bytesToStr creates a string normally from []byte -// -// Note that this method is roughly 1.5x slower than using the 'unsafe' method. -func bytesToStr(data []byte) string { - return string(data) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go deleted file mode 100644 index e90ec40d05..0000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/error.go +++ /dev/null @@ -1,15 +0,0 @@ -package jlexer - -import "fmt" - -// LexerError implements the error interface and represents all possible errors that can be -// generated during parsing the JSON data. -type LexerError struct { - Reason string - Offset int - Data string -} - -func (l *LexerError) Error() string { - return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go deleted file mode 100644 index b5f5e26132..0000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ /dev/null @@ -1,1244 +0,0 @@ -// Package jlexer contains a JSON lexer implementation. -// -// It is expected that it is mostly used with generated parser code, so the interface is tuned -// for a parser that knows what kind of data is expected. -package jlexer - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" - - "github.com/josharian/intern" -) - -// tokenKind determines type of a token. -type tokenKind byte - -const ( - tokenUndef tokenKind = iota // No token. - tokenDelim // Delimiter: one of '{', '}', '[' or ']'. - tokenString // A string literal, e.g. "abc\u1234" - tokenNumber // Number literal, e.g. 1.5e5 - tokenBool // Boolean literal: true or false. - tokenNull // null keyword. -) - -// token describes a single token: type, position in the input and value. -type token struct { - kind tokenKind // Type of a token. - - boolValue bool // Value if a boolean literal token. - byteValueCloned bool // true if byteValue was allocated and does not refer to original json body - byteValue []byte // Raw value of a token. - delimValue byte -} - -// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. -type Lexer struct { - Data []byte // Input data given to the lexer. - - start int // Start of the current token. - pos int // Current unscanned position in the input stream. - token token // Last scanned token, if token.kind != tokenUndef. - - firstElement bool // Whether current element is the first in array or an object. - wantSep byte // A comma or a colon character, which need to occur before a token. - - UseMultipleErrors bool // If we want to use multiple errors. - fatalError error // Fatal error occurred during lexing. It is usually a syntax error. - multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors. -} - -// FetchToken scans the input for the next token. -func (r *Lexer) FetchToken() { - r.token.kind = tokenUndef - r.start = r.pos - - // Check if r.Data has r.pos element - // If it doesn't, it mean corrupted input data - if len(r.Data) < r.pos { - r.errParse("Unexpected end of data") - return - } - // Determine the type of a token by skipping whitespace and reading the - // first character. - for _, c := range r.Data[r.pos:] { - switch c { - case ':', ',': - if r.wantSep == c { - r.pos++ - r.start++ - r.wantSep = 0 - } else { - r.errSyntax() - } - - case ' ', '\t', '\r', '\n': - r.pos++ - r.start++ - - case '"': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenString - r.fetchString() - return - - case '{', '[': - if r.wantSep != 0 { - r.errSyntax() - } - r.firstElement = true - r.token.kind = tokenDelim - r.token.delimValue = r.Data[r.pos] - r.pos++ - return - - case '}', ']': - if !r.firstElement && (r.wantSep != ',') { - r.errSyntax() - } - r.wantSep = 0 - r.token.kind = tokenDelim - r.token.delimValue = r.Data[r.pos] - r.pos++ - return - - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': - if r.wantSep != 0 { - r.errSyntax() - } - r.token.kind = tokenNumber - r.fetchNumber() - return - - case 'n': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenNull - r.fetchNull() - return - - case 't': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenBool - r.token.boolValue = true - r.fetchTrue() - return - - case 'f': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenBool - r.token.boolValue = false - r.fetchFalse() - return - - default: - r.errSyntax() - return - } - } - r.fatalError = io.EOF - return -} - -// isTokenEnd returns true if the char can follow a non-delimiter token -func isTokenEnd(c byte) bool { - return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':' -} - -// fetchNull fetches and checks remaining bytes of null keyword. -func (r *Lexer) fetchNull() { - r.pos += 4 - if r.pos > len(r.Data) || - r.Data[r.pos-3] != 'u' || - r.Data[r.pos-2] != 'l' || - r.Data[r.pos-1] != 'l' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 4 - r.errSyntax() - } -} - -// fetchTrue fetches and checks remaining bytes of true keyword. -func (r *Lexer) fetchTrue() { - r.pos += 4 - if r.pos > len(r.Data) || - r.Data[r.pos-3] != 'r' || - r.Data[r.pos-2] != 'u' || - r.Data[r.pos-1] != 'e' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 4 - r.errSyntax() - } -} - -// fetchFalse fetches and checks remaining bytes of false keyword. -func (r *Lexer) fetchFalse() { - r.pos += 5 - if r.pos > len(r.Data) || - r.Data[r.pos-4] != 'a' || - r.Data[r.pos-3] != 'l' || - r.Data[r.pos-2] != 's' || - r.Data[r.pos-1] != 'e' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 5 - r.errSyntax() - } -} - -// fetchNumber scans a number literal token. -func (r *Lexer) fetchNumber() { - hasE := false - afterE := false - hasDot := false - - r.pos++ - for i, c := range r.Data[r.pos:] { - switch { - case c >= '0' && c <= '9': - afterE = false - case c == '.' && !hasDot: - hasDot = true - case (c == 'e' || c == 'E') && !hasE: - hasE = true - hasDot = true - afterE = true - case (c == '+' || c == '-') && afterE: - afterE = false - default: - r.pos += i - if !isTokenEnd(c) { - r.errSyntax() - } else { - r.token.byteValue = r.Data[r.start:r.pos] - } - return - } - } - - r.pos = len(r.Data) - r.token.byteValue = r.Data[r.start:] -} - -// findStringLen tries to scan into the string literal for ending quote char to determine required size. -// The size will be exact if no escapes are present and may be inexact if there are escaped chars. -func findStringLen(data []byte) (isValid bool, length int) { - for { - idx := bytes.IndexByte(data, '"') - if idx == -1 { - return false, len(data) - } - if idx == 0 || (idx > 0 && data[idx-1] != '\\') { - return true, length + idx - } - - // count \\\\\\\ sequences. even number of slashes means quote is not really escaped - cnt := 1 - for idx-cnt-1 >= 0 && data[idx-cnt-1] == '\\' { - cnt++ - } - if cnt%2 == 0 { - return true, length + idx - } - - length += idx + 1 - data = data[idx+1:] - } -} - -// unescapeStringToken performs unescaping of string token. -// if no escaping is needed, original string is returned, otherwise - a new one allocated -func (r *Lexer) unescapeStringToken() (err error) { - data := r.token.byteValue - var unescapedData []byte - - for { - i := bytes.IndexByte(data, '\\') - if i == -1 { - break - } - - escapedRune, escapedBytes, err := decodeEscape(data[i:]) - if err != nil { - r.errParse(err.Error()) - return err - } - - if unescapedData == nil { - unescapedData = make([]byte, 0, len(r.token.byteValue)) - } - - var d [4]byte - s := utf8.EncodeRune(d[:], escapedRune) - unescapedData = append(unescapedData, data[:i]...) - unescapedData = append(unescapedData, d[:s]...) - - data = data[i+escapedBytes:] - } - - if unescapedData != nil { - r.token.byteValue = append(unescapedData, data...) - r.token.byteValueCloned = true - } - return -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - var val rune - for i := 2; i < len(s) && i < 6; i++ { - var v byte - c := s[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - v = c - '0' - case 'a', 'b', 'c', 'd', 'e', 'f': - v = c - 'a' + 10 - case 'A', 'B', 'C', 'D', 'E', 'F': - v = c - 'A' + 10 - default: - return -1 - } - - val <<= 4 - val |= rune(v) - } - return val -} - -// decodeEscape processes a single escape sequence and returns number of bytes processed. -func decodeEscape(data []byte) (decoded rune, bytesProcessed int, err error) { - if len(data) < 2 { - return 0, 0, errors.New("incorrect escape symbol \\ at the end of token") - } - - c := data[1] - switch c { - case '"', '/', '\\': - return rune(c), 2, nil - case 'b': - return '\b', 2, nil - case 'f': - return '\f', 2, nil - case 'n': - return '\n', 2, nil - case 'r': - return '\r', 2, nil - case 't': - return '\t', 2, nil - case 'u': - rr := getu4(data) - if rr < 0 { - return 0, 0, errors.New("incorrectly escaped \\uXXXX sequence") - } - - read := 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(data[read:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - read += 6 - rr = dec - } else { - rr = unicode.ReplacementChar - } - } - return rr, read, nil - } - - return 0, 0, errors.New("incorrectly escaped bytes") -} - -// fetchString scans a string literal token. -func (r *Lexer) fetchString() { - r.pos++ - data := r.Data[r.pos:] - - isValid, length := findStringLen(data) - if !isValid { - r.pos += length - r.errParse("unterminated string literal") - return - } - r.token.byteValue = data[:length] - r.pos += length + 1 // skip closing '"' as well -} - -// scanToken scans the next token if no token is currently available in the lexer. -func (r *Lexer) scanToken() { - if r.token.kind != tokenUndef || r.fatalError != nil { - return - } - - r.FetchToken() -} - -// consume resets the current token to allow scanning the next one. -func (r *Lexer) consume() { - r.token.kind = tokenUndef - r.token.byteValueCloned = false - r.token.delimValue = 0 -} - -// Ok returns true if no error (including io.EOF) was encountered during scanning. -func (r *Lexer) Ok() bool { - return r.fatalError == nil -} - -const maxErrorContextLen = 13 - -func (r *Lexer) errParse(what string) { - if r.fatalError == nil { - var str string - if len(r.Data)-r.pos <= maxErrorContextLen { - str = string(r.Data) - } else { - str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." - } - r.fatalError = &LexerError{ - Reason: what, - Offset: r.pos, - Data: str, - } - } -} - -func (r *Lexer) errSyntax() { - r.errParse("syntax error") -} - -func (r *Lexer) errInvalidToken(expected string) { - if r.fatalError != nil { - return - } - if r.UseMultipleErrors { - r.pos = r.start - r.consume() - r.SkipRecursive() - switch expected { - case "[": - r.token.delimValue = ']' - r.token.kind = tokenDelim - case "{": - r.token.delimValue = '}' - r.token.kind = tokenDelim - } - r.addNonfatalError(&LexerError{ - Reason: fmt.Sprintf("expected %s", expected), - Offset: r.start, - Data: string(r.Data[r.start:r.pos]), - }) - return - } - - var str string - if len(r.token.byteValue) <= maxErrorContextLen { - str = string(r.token.byteValue) - } else { - str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." - } - r.fatalError = &LexerError{ - Reason: fmt.Sprintf("expected %s", expected), - Offset: r.pos, - Data: str, - } -} - -func (r *Lexer) GetPos() int { - return r.pos -} - -// Delim consumes a token and verifies that it is the given delimiter. -func (r *Lexer) Delim(c byte) { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - - if !r.Ok() || r.token.delimValue != c { - r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled. - r.errInvalidToken(string([]byte{c})) - } else { - r.consume() - } -} - -// IsDelim returns true if there was no scanning error and next token is the given delimiter. -func (r *Lexer) IsDelim(c byte) bool { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - return !r.Ok() || r.token.delimValue == c -} - -// Null verifies that the next token is null and consumes it. -func (r *Lexer) Null() { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenNull { - r.errInvalidToken("null") - } - r.consume() -} - -// IsNull returns true if the next token is a null keyword. -func (r *Lexer) IsNull() bool { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - return r.Ok() && r.token.kind == tokenNull -} - -// Skip skips a single token. -func (r *Lexer) Skip() { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - r.consume() -} - -// SkipRecursive skips next array or object completely, or just skips a single token if not -// an array/object. -// -// Note: no syntax validation is performed on the skipped data. -func (r *Lexer) SkipRecursive() { - r.scanToken() - var start, end byte - startPos := r.start - - switch r.token.delimValue { - case '{': - start, end = '{', '}' - case '[': - start, end = '[', ']' - default: - r.consume() - return - } - - r.consume() - - level := 1 - inQuotes := false - wasEscape := false - - for i, c := range r.Data[r.pos:] { - switch { - case c == start && !inQuotes: - level++ - case c == end && !inQuotes: - level-- - if level == 0 { - r.pos += i + 1 - if !json.Valid(r.Data[startPos:r.pos]) { - r.pos = len(r.Data) - r.fatalError = &LexerError{ - Reason: "skipped array/object json value is invalid", - Offset: r.pos, - Data: string(r.Data[r.pos:]), - } - } - return - } - case c == '\\' && inQuotes: - wasEscape = !wasEscape - continue - case c == '"' && inQuotes: - inQuotes = wasEscape - case c == '"': - inQuotes = true - } - wasEscape = false - } - r.pos = len(r.Data) - r.fatalError = &LexerError{ - Reason: "EOF reached while skipping array/object or token", - Offset: r.pos, - Data: string(r.Data[r.pos:]), - } -} - -// Raw fetches the next item recursively as a data slice -func (r *Lexer) Raw() []byte { - r.SkipRecursive() - if !r.Ok() { - return nil - } - return r.Data[r.start:r.pos] -} - -// IsStart returns whether the lexer is positioned at the start -// of an input string. -func (r *Lexer) IsStart() bool { - return r.pos == 0 -} - -// Consumed reads all remaining bytes from the input, publishing an error if -// there is anything but whitespace remaining. -func (r *Lexer) Consumed() { - if r.pos > len(r.Data) || !r.Ok() { - return - } - - for _, c := range r.Data[r.pos:] { - if c != ' ' && c != '\t' && c != '\r' && c != '\n' { - r.AddError(&LexerError{ - Reason: "invalid character '" + string(c) + "' after top-level value", - Offset: r.pos, - Data: string(r.Data[r.pos:]), - }) - return - } - - r.pos++ - r.start++ - } -} - -func (r *Lexer) unsafeString(skipUnescape bool) (string, []byte) { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return "", nil - } - if !skipUnescape { - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return "", nil - } - } - - bytes := r.token.byteValue - ret := bytesToStr(r.token.byteValue) - r.consume() - return ret, bytes -} - -// UnsafeString returns the string value if the token is a string literal. -// -// Warning: returned string may point to the input buffer, so the string should not outlive -// the input buffer. Intended pattern of usage is as an argument to a switch statement. -func (r *Lexer) UnsafeString() string { - ret, _ := r.unsafeString(false) - return ret -} - -// UnsafeBytes returns the byte slice if the token is a string literal. -func (r *Lexer) UnsafeBytes() []byte { - _, ret := r.unsafeString(false) - return ret -} - -// UnsafeFieldName returns current member name string token -func (r *Lexer) UnsafeFieldName(skipUnescape bool) string { - ret, _ := r.unsafeString(skipUnescape) - return ret -} - -// String reads a string literal. -func (r *Lexer) String() string { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return "" - } - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return "" - } - var ret string - if r.token.byteValueCloned { - ret = bytesToStr(r.token.byteValue) - } else { - ret = string(r.token.byteValue) - } - r.consume() - return ret -} - -// StringIntern reads a string literal, and performs string interning on it. -func (r *Lexer) StringIntern() string { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return "" - } - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return "" - } - ret := intern.Bytes(r.token.byteValue) - r.consume() - return ret -} - -// Bytes reads a string literal and base64 decodes it into a byte slice. -func (r *Lexer) Bytes() []byte { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return nil - } - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return nil - } - ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) - n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) - if err != nil { - r.fatalError = &LexerError{ - Reason: err.Error(), - } - return nil - } - - r.consume() - return ret[:n] -} - -// Bool reads a true or false boolean keyword. -func (r *Lexer) Bool() bool { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenBool { - r.errInvalidToken("bool") - return false - } - ret := r.token.boolValue - r.consume() - return ret -} - -func (r *Lexer) number() string { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenNumber { - r.errInvalidToken("number") - return "" - } - ret := bytesToStr(r.token.byteValue) - r.consume() - return ret -} - -func (r *Lexer) Uint8() uint8 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return uint8(n) -} - -func (r *Lexer) Uint16() uint16 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return uint16(n) -} - -func (r *Lexer) Uint32() uint32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return uint32(n) -} - -func (r *Lexer) Uint64() uint64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return n -} - -func (r *Lexer) Uint() uint { - return uint(r.Uint64()) -} - -func (r *Lexer) Int8() int8 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return int8(n) -} - -func (r *Lexer) Int16() int16 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return int16(n) -} - -func (r *Lexer) Int32() int32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return int32(n) -} - -func (r *Lexer) Int64() int64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return n -} - -func (r *Lexer) Int() int { - return int(r.Int64()) -} - -func (r *Lexer) Uint8Str() uint8 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return uint8(n) -} - -func (r *Lexer) Uint16Str() uint16 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return uint16(n) -} - -func (r *Lexer) Uint32Str() uint32 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return uint32(n) -} - -func (r *Lexer) Uint64Str() uint64 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return n -} - -func (r *Lexer) UintStr() uint { - return uint(r.Uint64Str()) -} - -func (r *Lexer) UintptrStr() uintptr { - return uintptr(r.Uint64Str()) -} - -func (r *Lexer) Int8Str() int8 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return int8(n) -} - -func (r *Lexer) Int16Str() int16 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return int16(n) -} - -func (r *Lexer) Int32Str() int32 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return int32(n) -} - -func (r *Lexer) Int64Str() int64 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return n -} - -func (r *Lexer) IntStr() int { - return int(r.Int64Str()) -} - -func (r *Lexer) Float32() float32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseFloat(s, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return float32(n) -} - -func (r *Lexer) Float32Str() float32 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - n, err := strconv.ParseFloat(s, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return float32(n) -} - -func (r *Lexer) Float64() float64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseFloat(s, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return n -} - -func (r *Lexer) Float64Str() float64 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - n, err := strconv.ParseFloat(s, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return n -} - -func (r *Lexer) Error() error { - return r.fatalError -} - -func (r *Lexer) AddError(e error) { - if r.fatalError == nil { - r.fatalError = e - } -} - -func (r *Lexer) AddNonFatalError(e error) { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Data: string(r.Data[r.start:r.pos]), - Reason: e.Error(), - }) -} - -func (r *Lexer) addNonfatalError(err *LexerError) { - if r.UseMultipleErrors { - // We don't want to add errors with the same offset. - if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset { - return - } - r.multipleErrors = append(r.multipleErrors, err) - return - } - r.fatalError = err -} - -func (r *Lexer) GetNonFatalErrors() []*LexerError { - return r.multipleErrors -} - -// JsonNumber fetches and json.Number from 'encoding/json' package. -// Both int, float or string, contains them are valid values -func (r *Lexer) JsonNumber() json.Number { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() { - r.errInvalidToken("json.Number") - return json.Number("") - } - - switch r.token.kind { - case tokenString: - return json.Number(r.String()) - case tokenNumber: - return json.Number(r.Raw()) - case tokenNull: - r.Null() - return json.Number("") - default: - r.errSyntax() - return json.Number("") - } -} - -// Interface fetches an interface{} analogous to the 'encoding/json' package. -func (r *Lexer) Interface() interface{} { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - - if !r.Ok() { - return nil - } - switch r.token.kind { - case tokenString: - return r.String() - case tokenNumber: - return r.Float64() - case tokenBool: - return r.Bool() - case tokenNull: - r.Null() - return nil - } - - if r.token.delimValue == '{' { - r.consume() - - ret := map[string]interface{}{} - for !r.IsDelim('}') { - key := r.String() - r.WantColon() - ret[key] = r.Interface() - r.WantComma() - } - r.Delim('}') - - if r.Ok() { - return ret - } else { - return nil - } - } else if r.token.delimValue == '[' { - r.consume() - - ret := []interface{}{} - for !r.IsDelim(']') { - ret = append(ret, r.Interface()) - r.WantComma() - } - r.Delim(']') - - if r.Ok() { - return ret - } else { - return nil - } - } - r.errSyntax() - return nil -} - -// WantComma requires a comma to be present before fetching next token. -func (r *Lexer) WantComma() { - r.wantSep = ',' - r.firstElement = false -} - -// WantColon requires a colon to be present before fetching next token. -func (r *Lexer) WantColon() { - r.wantSep = ':' - r.firstElement = false -} diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go deleted file mode 100644 index 2c5b20105b..0000000000 --- a/vendor/github.com/mailru/easyjson/jwriter/writer.go +++ /dev/null @@ -1,405 +0,0 @@ -// Package jwriter contains a JSON writer. -package jwriter - -import ( - "io" - "strconv" - "unicode/utf8" - - "github.com/mailru/easyjson/buffer" -) - -// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but -// Flags field in Writer is used to set and pass them around. -type Flags int - -const ( - NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'. - NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'. -) - -// Writer is a JSON writer. -type Writer struct { - Flags Flags - - Error error - Buffer buffer.Buffer - NoEscapeHTML bool -} - -// Size returns the size of the data that was written out. -func (w *Writer) Size() int { - return w.Buffer.Size() -} - -// DumpTo outputs the data to given io.Writer, resetting the buffer. -func (w *Writer) DumpTo(out io.Writer) (written int, err error) { - return w.Buffer.DumpTo(out) -} - -// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice -// as argument that it will try to reuse. -func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) { - if w.Error != nil { - return nil, w.Error - } - - return w.Buffer.BuildBytes(reuse...), nil -} - -// ReadCloser returns an io.ReadCloser that can be used to read the data. -// ReadCloser also resets the buffer. -func (w *Writer) ReadCloser() (io.ReadCloser, error) { - if w.Error != nil { - return nil, w.Error - } - - return w.Buffer.ReadCloser(), nil -} - -// RawByte appends raw binary data to the buffer. -func (w *Writer) RawByte(c byte) { - w.Buffer.AppendByte(c) -} - -// RawByte appends raw binary data to the buffer. -func (w *Writer) RawString(s string) { - w.Buffer.AppendString(s) -} - -// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for -// calling with results of MarshalJSON-like functions. -func (w *Writer) Raw(data []byte, err error) { - switch { - case w.Error != nil: - return - case err != nil: - w.Error = err - case len(data) > 0: - w.Buffer.AppendBytes(data) - default: - w.RawString("null") - } -} - -// RawText encloses raw binary data in quotes and appends in to the buffer. -// Useful for calling with results of MarshalText-like functions. -func (w *Writer) RawText(data []byte, err error) { - switch { - case w.Error != nil: - return - case err != nil: - w.Error = err - case len(data) > 0: - w.String(string(data)) - default: - w.RawString("null") - } -} - -// Base64Bytes appends data to the buffer after base64 encoding it -func (w *Writer) Base64Bytes(data []byte) { - if data == nil { - w.Buffer.AppendString("null") - return - } - w.Buffer.AppendByte('"') - w.base64(data) - w.Buffer.AppendByte('"') -} - -func (w *Writer) Uint8(n uint8) { - w.Buffer.EnsureSpace(3) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint16(n uint16) { - w.Buffer.EnsureSpace(5) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint32(n uint32) { - w.Buffer.EnsureSpace(10) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint(n uint) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint64(n uint64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) -} - -func (w *Writer) Int8(n int8) { - w.Buffer.EnsureSpace(4) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int16(n int16) { - w.Buffer.EnsureSpace(6) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int32(n int32) { - w.Buffer.EnsureSpace(11) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int(n int) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int64(n int64) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) -} - -func (w *Writer) Uint8Str(n uint8) { - w.Buffer.EnsureSpace(3) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint16Str(n uint16) { - w.Buffer.EnsureSpace(5) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint32Str(n uint32) { - w.Buffer.EnsureSpace(10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) UintStr(n uint) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint64Str(n uint64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) UintptrStr(n uintptr) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int8Str(n int8) { - w.Buffer.EnsureSpace(4) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int16Str(n int16) { - w.Buffer.EnsureSpace(6) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int32Str(n int32) { - w.Buffer.EnsureSpace(11) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) IntStr(n int) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int64Str(n int64) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Float32(n float32) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) -} - -func (w *Writer) Float32Str(n float32) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Float64(n float64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) -} - -func (w *Writer) Float64Str(n float64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Bool(v bool) { - w.Buffer.EnsureSpace(5) - if v { - w.Buffer.Buf = append(w.Buffer.Buf, "true"...) - } else { - w.Buffer.Buf = append(w.Buffer.Buf, "false"...) - } -} - -const chars = "0123456789abcdef" - -func getTable(falseValues ...int) [128]bool { - table := [128]bool{} - - for i := 0; i < 128; i++ { - table[i] = true - } - - for _, v := range falseValues { - table[v] = false - } - - return table -} - -var ( - htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\') - htmlNoEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '\\') -) - -func (w *Writer) String(s string) { - w.Buffer.AppendByte('"') - - // Portions of the string that contain no escapes are appended as - // byte slices. - - p := 0 // last non-escape symbol - - escapeTable := &htmlEscapeTable - if w.NoEscapeHTML { - escapeTable = &htmlNoEscapeTable - } - - for i := 0; i < len(s); { - c := s[i] - - if c < utf8.RuneSelf { - if escapeTable[c] { - // single-width character, no escaping is required - i++ - continue - } - - w.Buffer.AppendString(s[p:i]) - switch c { - case '\t': - w.Buffer.AppendString(`\t`) - case '\r': - w.Buffer.AppendString(`\r`) - case '\n': - w.Buffer.AppendString(`\n`) - case '\\': - w.Buffer.AppendString(`\\`) - case '"': - w.Buffer.AppendString(`\"`) - default: - w.Buffer.AppendString(`\u00`) - w.Buffer.AppendByte(chars[c>>4]) - w.Buffer.AppendByte(chars[c&0xf]) - } - - i++ - p = i - continue - } - - // broken utf - runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) - if runeValue == utf8.RuneError && runeWidth == 1 { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendString(`\ufffd`) - i++ - p = i - continue - } - - // jsonp stuff - tab separator and line separator - if runeValue == '\u2028' || runeValue == '\u2029' { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendString(`\u202`) - w.Buffer.AppendByte(chars[runeValue&0xf]) - i += runeWidth - p = i - continue - } - i += runeWidth - } - w.Buffer.AppendString(s[p:]) - w.Buffer.AppendByte('"') -} - -const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" -const padChar = '=' - -func (w *Writer) base64(in []byte) { - - if len(in) == 0 { - return - } - - w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4) - - si := 0 - n := (len(in) / 3) * 3 - - for si < n { - // Convert 3x 8bit source bytes into 4 bytes - val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2]) - - w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F]) - - si += 3 - } - - remain := len(in) - si - if remain == 0 { - return - } - - // Add the remaining small block - val := uint(in[si+0]) << 16 - if remain == 2 { - val |= uint(in[si+1]) << 8 - } - - w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F]) - - switch remain { - case 2: - w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar)) - case 1: - w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar)) - } -} diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE deleted file mode 100644 index 65dc692b6b..0000000000 --- a/vendor/github.com/mattn/go-isatty/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) Yasuhiro MATSUMOTO - -MIT License (Expat) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md deleted file mode 100644 index 38418353e3..0000000000 --- a/vendor/github.com/mattn/go-isatty/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# go-isatty - -[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) -[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) -[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) -[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) - -isatty for golang - -## Usage - -```go -package main - -import ( - "fmt" - "github.com/mattn/go-isatty" - "os" -) - -func main() { - if isatty.IsTerminal(os.Stdout.Fd()) { - fmt.Println("Is Terminal") - } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { - fmt.Println("Is Cygwin/MSYS2 Terminal") - } else { - fmt.Println("Is Not Terminal") - } -} -``` - -## Installation - -``` -$ go get github.com/mattn/go-isatty -``` - -## License - -MIT - -## Author - -Yasuhiro Matsumoto (a.k.a mattn) - -## Thanks - -* k-takata: base idea for IsCygwinTerminal - - https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go deleted file mode 100644 index 17d4f90ebc..0000000000 --- a/vendor/github.com/mattn/go-isatty/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package isatty implements interface to isatty -package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh deleted file mode 100644 index 012162b077..0000000000 --- a/vendor/github.com/mattn/go-isatty/go.test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list ./... | grep -v vendor); do - go test -race -coverprofile=profile.out -covermode=atomic "$d" - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go deleted file mode 100644 index d0ea68f408..0000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine && !tinygo -// +build darwin freebsd openbsd netbsd dragonfly hurd -// +build !appengine -// +build !tinygo - -package isatty - -import "golang.org/x/sys/unix" - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) - return err == nil -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go deleted file mode 100644 index 7402e0618a..0000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_others.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build (appengine || js || nacl || tinygo || wasm) && !windows -// +build appengine js nacl tinygo wasm -// +build !windows - -package isatty - -// IsTerminal returns true if the file descriptor is terminal which -// is always false on js and appengine classic which is a sandboxed PaaS. -func IsTerminal(fd uintptr) bool { - return false -} - -// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go deleted file mode 100644 index bae7f9bb3d..0000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_plan9.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build plan9 -// +build plan9 - -package isatty - -import ( - "syscall" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - path, err := syscall.Fd2path(int(fd)) - if err != nil { - return false - } - return path == "/dev/cons" || path == "/mnt/term/dev/cons" -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go deleted file mode 100644 index 0c3acf2dc2..0000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build solaris && !appengine -// +build solaris,!appengine - -package isatty - -import ( - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c -func IsTerminal(fd uintptr) bool { - _, err := unix.IoctlGetTermio(int(fd), unix.TCGETA) - return err == nil -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go deleted file mode 100644 index 0337d8cf6d..0000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build (linux || aix || zos) && !appengine && !tinygo -// +build linux aix zos -// +build !appengine -// +build !tinygo - -package isatty - -import "golang.org/x/sys/unix" - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) - return err == nil -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go deleted file mode 100644 index 8e3c99171b..0000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_windows.go +++ /dev/null @@ -1,125 +0,0 @@ -//go:build windows && !appengine -// +build windows,!appengine - -package isatty - -import ( - "errors" - "strings" - "syscall" - "unicode/utf16" - "unsafe" -) - -const ( - objectNameInfo uintptr = 1 - fileNameInfo = 2 - fileTypePipe = 3 -) - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - ntdll = syscall.NewLazyDLL("ntdll.dll") - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") - procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") - procGetFileType = kernel32.NewProc("GetFileType") - procNtQueryObject = ntdll.NewProc("NtQueryObject") -) - -func init() { - // Check if GetFileInformationByHandleEx is available. - if procGetFileInformationByHandleEx.Find() != nil { - procGetFileInformationByHandleEx = nil - } -} - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} - -// Check pipe name is used for cygwin/msys2 pty. -// Cygwin/MSYS2 PTY has a name like: -// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master -func isCygwinPipeName(name string) bool { - token := strings.Split(name, "-") - if len(token) < 5 { - return false - } - - if token[0] != `\msys` && - token[0] != `\cygwin` && - token[0] != `\Device\NamedPipe\msys` && - token[0] != `\Device\NamedPipe\cygwin` { - return false - } - - if token[1] == "" { - return false - } - - if !strings.HasPrefix(token[2], "pty") { - return false - } - - if token[3] != `from` && token[3] != `to` { - return false - } - - if token[4] != "master" { - return false - } - - return true -} - -// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler -// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion -// guys are using Windows XP, this is a workaround for those guys, it will also work on system from -// Windows vista to 10 -// see https://stackoverflow.com/a/18792477 for details -func getFileNameByHandle(fd uintptr) (string, error) { - if procNtQueryObject == nil { - return "", errors.New("ntdll.dll: NtQueryObject not supported") - } - - var buf [4 + syscall.MAX_PATH]uint16 - var result int - r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, - fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) - if r != 0 { - return "", e - } - return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil -} - -// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 -// terminal. -func IsCygwinTerminal(fd uintptr) bool { - if procGetFileInformationByHandleEx == nil { - name, err := getFileNameByHandle(fd) - if err != nil { - return false - } - return isCygwinPipeName(name) - } - - // Cygwin/msys's pty is a pipe. - ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) - if ft != fileTypePipe || e != 0 { - return false - } - - var buf [2 + syscall.MAX_PATH]uint16 - r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), - 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), - uintptr(len(buf)*2), 0, 0) - if r == 0 || e != 0 { - return false - } - - l := *(*uint32)(unsafe.Pointer(&buf)) - return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) -} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE deleted file mode 100644 index f9c841a51e..0000000000 --- a/vendor/github.com/mitchellh/go-homedir/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md deleted file mode 100644 index d70706d5b3..0000000000 --- a/vendor/github.com/mitchellh/go-homedir/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# go-homedir - -This is a Go library for detecting the user's home directory without -the use of cgo, so the library can be used in cross-compilation environments. - -Usage is incredibly simple, just call `homedir.Dir()` to get the home directory -for a user, and `homedir.Expand()` to expand the `~` in a path to the home -directory. - -**Why not just use `os/user`?** The built-in `os/user` package requires -cgo on Darwin systems. This means that any Go code that uses that package -cannot cross compile. But 99% of the time the use for `os/user` is just to -retrieve the home directory, which we can do for the current user without -cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go deleted file mode 100644 index 25378537ea..0000000000 --- a/vendor/github.com/mitchellh/go-homedir/homedir.go +++ /dev/null @@ -1,167 +0,0 @@ -package homedir - -import ( - "bytes" - "errors" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" -) - -// DisableCache will disable caching of the home directory. Caching is enabled -// by default. -var DisableCache bool - -var homedirCache string -var cacheLock sync.RWMutex - -// Dir returns the home directory for the executing user. -// -// This uses an OS-specific method for discovering the home directory. -// An error is returned if a home directory cannot be detected. -func Dir() (string, error) { - if !DisableCache { - cacheLock.RLock() - cached := homedirCache - cacheLock.RUnlock() - if cached != "" { - return cached, nil - } - } - - cacheLock.Lock() - defer cacheLock.Unlock() - - var result string - var err error - if runtime.GOOS == "windows" { - result, err = dirWindows() - } else { - // Unix-like system, so just assume Unix - result, err = dirUnix() - } - - if err != nil { - return "", err - } - homedirCache = result - return result, nil -} - -// Expand expands the path to include the home directory if the path -// is prefixed with `~`. If it isn't prefixed with `~`, the path is -// returned as-is. -func Expand(path string) (string, error) { - if len(path) == 0 { - return path, nil - } - - if path[0] != '~' { - return path, nil - } - - if len(path) > 1 && path[1] != '/' && path[1] != '\\' { - return "", errors.New("cannot expand user-specific home dir") - } - - dir, err := Dir() - if err != nil { - return "", err - } - - return filepath.Join(dir, path[1:]), nil -} - -// Reset clears the cache, forcing the next call to Dir to re-detect -// the home directory. This generally never has to be called, but can be -// useful in tests if you're modifying the home directory via the HOME -// env var or something. -func Reset() { - cacheLock.Lock() - defer cacheLock.Unlock() - homedirCache = "" -} - -func dirUnix() (string, error) { - homeEnv := "HOME" - if runtime.GOOS == "plan9" { - // On plan9, env vars are lowercase. - homeEnv = "home" - } - - // First prefer the HOME environmental variable - if home := os.Getenv(homeEnv); home != "" { - return home, nil - } - - var stdout bytes.Buffer - - // If that fails, try OS specific commands - if runtime.GOOS == "darwin" { - cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) - cmd.Stdout = &stdout - if err := cmd.Run(); err == nil { - result := strings.TrimSpace(stdout.String()) - if result != "" { - return result, nil - } - } - } else { - cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - // If the error is ErrNotFound, we ignore it. Otherwise, return it. - if err != exec.ErrNotFound { - return "", err - } - } else { - if passwd := strings.TrimSpace(stdout.String()); passwd != "" { - // username:password:uid:gid:gecos:home:shell - passwdParts := strings.SplitN(passwd, ":", 7) - if len(passwdParts) > 5 { - return passwdParts[5], nil - } - } - } - } - - // If all else fails, try the shell - stdout.Reset() - cmd := exec.Command("sh", "-c", "cd && pwd") - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - return "", err - } - - result := strings.TrimSpace(stdout.String()) - if result == "" { - return "", errors.New("blank output when reading home directory") - } - - return result, nil -} - -func dirWindows() (string, error) { - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - - // Prefer standard environment variable USERPROFILE - if home := os.Getenv("USERPROFILE"); home != "" { - return home, nil - } - - drive := os.Getenv("HOMEDRIVE") - path := os.Getenv("HOMEPATH") - home := drive + path - if drive == "" || path == "" { - return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") - } - - return home, nil -} diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md deleted file mode 100644 index c758234904..0000000000 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ /dev/null @@ -1,96 +0,0 @@ -## 1.5.0 - -* New option `IgnoreUntaggedFields` to ignore decoding to any fields - without `mapstructure` (or the configured tag name) set [GH-277] -* New option `ErrorUnset` which makes it an error if any fields - in a target struct are not set by the decoding process. [GH-225] -* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] -* Decoding to slice from array no longer crashes [GH-265] -* Decode nested struct pointers to map [GH-271] -* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] -* Fix issue where fields with `,omitempty` would sometimes decode - into a map with an empty string key [GH-281] - -## 1.4.3 - -* Fix cases where `json.Number` didn't decode properly [GH-261] - -## 1.4.2 - -* Custom name matchers to support any sort of casing, formatting, etc. for - field names. [GH-250] -* Fix possible panic in ComposeDecodeHookFunc [GH-251] - -## 1.4.1 - -* Fix regression where `*time.Time` value would be set to empty and not be sent - to decode hooks properly [GH-232] - -## 1.4.0 - -* A new decode hook type `DecodeHookFuncValue` has been added that has - access to the full values. [GH-183] -* Squash is now supported with embedded fields that are struct pointers [GH-205] -* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] - -## 1.3.3 - -* Decoding maps from maps creates a settable value for decode hooks [GH-203] - -## 1.3.2 - -* Decode into interface type with a struct value is supported [GH-187] - -## 1.3.1 - -* Squash should only squash embedded structs. [GH-194] - -## 1.3.0 - -* Added `",omitempty"` support. This will ignore zero values in the source - structure when encoding. [GH-145] - -## 1.2.3 - -* Fix duplicate entries in Keys list with pointer values. [GH-185] - -## 1.2.2 - -* Do not add unsettable (unexported) values to the unused metadata key - or "remain" value. [GH-150] - -## 1.2.1 - -* Go modules checksum mismatch fix - -## 1.2.0 - -* Added support to capture unused values in a field using the `",remain"` value - in the mapstructure tag. There is an example to showcase usage. -* Added `DecoderConfig` option to always squash embedded structs -* `json.Number` can decode into `uint` types -* Empty slices are preserved and not replaced with nil slices -* Fix panic that can occur in when decoding a map into a nil slice of structs -* Improved package documentation for godoc - -## 1.1.2 - -* Fix error when decode hook decodes interface implementation into interface - type. [GH-140] - -## 1.1.1 - -* Fix panic that can happen in `decodePtr` - -## 1.1.0 - -* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] -* Support struct to struct decoding [GH-137] -* If source map value is nil, then destination map value is nil (instead of empty) -* If source slice value is nil, then destination slice value is nil (instead of empty) -* If source pointer is nil, then destination pointer is set to nil (instead of - allocated zero value of type) - -## 1.0.0 - -* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE deleted file mode 100644 index f9c841a51e..0000000000 --- a/vendor/github.com/mitchellh/mapstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 0018dc7d9f..0000000000 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index 3a754ca724..0000000000 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,279 +0,0 @@ -package mapstructure - -import ( - "encoding" - "errors" - "fmt" - "net" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - var f3 DecodeHookFuncValue - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2, f3} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Value, to reflect.Value) (interface{}, error) { - - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from.Type(), to.Type(), from.Interface()) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), from.Interface()) - case DecodeHookFuncValue: - return f(from, to) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - var err error - data := f.Interface() - - newFrom := f - for _, f1 := range fs { - data, err = DecodeHookExec(f1, newFrom, t) - if err != nil { - return nil, err - } - newFrom = reflect.ValueOf(data) - } - - return data, nil - } -} - -// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. -// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. -func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { - return func(a, b reflect.Value) (interface{}, error) { - var allErrs string - var out interface{} - var err error - - for _, f := range ff { - out, err = DecodeHookExec(f, a, b) - if err != nil { - allErrs += err.Error() + "\n" - continue - } - - return out, nil - } - - return nil, errors.New(allErrs) - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -// StringToIPHookFunc returns a DecodeHookFunc that converts -// strings to net.IP -func StringToIPHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IP{}) { - return data, nil - } - - // Convert it by parsing - ip := net.ParseIP(data.(string)) - if ip == nil { - return net.IP{}, fmt.Errorf("failed parsing ip %v", data) - } - - return ip, nil - } -} - -// StringToIPNetHookFunc returns a DecodeHookFunc that converts -// strings to net.IPNet -func StringToIPNetHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IPNet{}) { - return data, nil - } - - // Convert it by parsing - _, net, err := net.ParseCIDR(data.(string)) - return net, err - } -} - -// StringToTimeHookFunc returns a DecodeHookFunc that converts -// strings to time.Time. -func StringToTimeHookFunc(layout string) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Time{}) { - return data, nil - } - - // Convert it by parsing - return time.Parse(layout, data.(string)) - } -} - -// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to -// the decoder. -// -// Note that this is significantly different from the WeaklyTypedInput option -// of the DecoderConfig. -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } - return "0", nil - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} - -func RecursiveStructToMapHookFunc() DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - if f.Kind() != reflect.Struct { - return f.Interface(), nil - } - - var i interface{} = struct{}{} - if t.Type() != reflect.TypeOf(&i).Elem() { - return f.Interface(), nil - } - - m := make(map[string]interface{}) - t.Set(reflect.ValueOf(m)) - - return f.Interface(), nil - } -} - -// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies -// strings to the UnmarshalText function, when the target type -// implements the encoding.TextUnmarshaler interface -func TextUnmarshallerHookFunc() DecodeHookFuncType { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - result := reflect.New(t).Interface() - unmarshaller, ok := result.(encoding.TextUnmarshaler) - if !ok { - return data, nil - } - if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { - return nil, err - } - return result, nil - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5af3..0000000000 --- a/vendor/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go deleted file mode 100644 index 1efb22ac36..0000000000 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ /dev/null @@ -1,1540 +0,0 @@ -// Package mapstructure exposes functionality to convert one arbitrary -// Go type into another, typically to convert a map[string]interface{} -// into a native Go structure. -// -// The Go structure can be arbitrarily complex, containing slices, -// other structs, etc. and the decoder will properly decode nested -// maps and so on into the proper structures in the native Go struct. -// See the examples to see what the decoder is capable of. -// -// The simplest function to start with is Decode. -// -// Field Tags -// -// When decoding to a struct, mapstructure will use the field name by -// default to perform the mapping. For example, if a struct has a field -// "Username" then mapstructure will look for a key in the source value -// of "username" (case insensitive). -// -// type User struct { -// Username string -// } -// -// You can change the behavior of mapstructure by using struct tags. -// The default struct tag that mapstructure looks for is "mapstructure" -// but you can customize it using DecoderConfig. -// -// Renaming Fields -// -// To rename the key that mapstructure looks for, use the "mapstructure" -// tag and set a value directly. For example, to change the "username" example -// above to "user": -// -// type User struct { -// Username string `mapstructure:"user"` -// } -// -// Embedded Structs and Squashing -// -// Embedded structs are treated as if they're another field with that name. -// By default, the two structs below are equivalent when decoding with -// mapstructure: -// -// type Person struct { -// Name string -// } -// -// type Friend struct { -// Person -// } -// -// type Friend struct { -// Person Person -// } -// -// This would require an input that looks like below: -// -// map[string]interface{}{ -// "person": map[string]interface{}{"name": "alice"}, -// } -// -// If your "person" value is NOT nested, then you can append ",squash" to -// your tag value and mapstructure will treat it as if the embedded struct -// were part of the struct directly. Example: -// -// type Friend struct { -// Person `mapstructure:",squash"` -// } -// -// Now the following input would be accepted: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// When decoding from a struct to a map, the squash tag squashes the struct -// fields into a single map. Using the example structs from above: -// -// Friend{Person: Person{Name: "alice"}} -// -// Will be decoded into a map: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// DecoderConfig has a field that changes the behavior of mapstructure -// to always squash embedded structs. -// -// Remainder Values -// -// If there are any unmapped keys in the source value, mapstructure by -// default will silently ignore them. You can error by setting ErrorUnused -// in DecoderConfig. If you're using Metadata you can also maintain a slice -// of the unused keys. -// -// You can also use the ",remain" suffix on your tag to collect all unused -// values in a map. The field with this tag MUST be a map type and should -// probably be a "map[string]interface{}" or "map[interface{}]interface{}". -// See example below: -// -// type Friend struct { -// Name string -// Other map[string]interface{} `mapstructure:",remain"` -// } -// -// Given the input below, Other would be populated with the other -// values that weren't used (everything but "name"): -// -// map[string]interface{}{ -// "name": "bob", -// "address": "123 Maple St.", -// } -// -// Omit Empty Values -// -// When decoding from a struct to any other value, you may use the -// ",omitempty" suffix on your tag to omit that value if it equates to -// the zero value. The zero value of all types is specified in the Go -// specification. -// -// For example, the zero type of a numeric type is zero ("0"). If the struct -// field value is zero and a numeric type, the field is empty, and it won't -// be encoded into the destination type. -// -// type Source struct { -// Age int `mapstructure:",omitempty"` -// } -// -// Unexported fields -// -// Since unexported (private) struct fields cannot be set outside the package -// where they are defined, the decoder will simply skip them. -// -// For this output type definition: -// -// type Exported struct { -// private string // this unexported field will be skipped -// Public string -// } -// -// Using this map as input: -// -// map[string]interface{}{ -// "private": "I will be ignored", -// "Public": "I made it through!", -// } -// -// The following struct will be decoded: -// -// type Exported struct { -// private: "" // field is left with an empty string (zero value) -// Public: "I made it through!" -// } -// -// Other Configuration -// -// mapstructure is highly configurable. See the DecoderConfig struct -// for other features and options that are supported. -package mapstructure - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" -) - -// DecodeHookFunc is the callback function that can be used for -// data transformations. See "DecodeHook" in the DecoderConfig -// struct. -// -// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or -// DecodeHookFuncValue. -// Values are a superset of Types (Values can return types), and Types are a -// superset of Kinds (Types can return Kinds) and are generally a richer thing -// to use, but Kinds are simpler if you only need those. -// -// The reason DecodeHookFunc is multi-typed is for backwards compatibility: -// we started with Kinds and then realized Types were the better solution, -// but have a promise to not break backwards compat so we now support -// both. -type DecodeHookFunc interface{} - -// DecodeHookFuncType is a DecodeHookFunc which has complete information about -// the source and target types. -type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) - -// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the -// source and target types. -type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) - -// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target -// values. -type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) - -// DecoderConfig is the configuration that is used to create a new decoder -// and allows customization of various aspects of decoding. -type DecoderConfig struct { - // DecodeHook, if set, will be called before any decoding and any - // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. The - // DecodeHook is called for every map and value in the input. This means - // that if a struct has embedded fields with squash tags the decode hook - // is called only once with all of the input data, not once for each - // embedded struct. - // - // If an error is returned, the entire decode will fail with that error. - DecodeHook DecodeHookFunc - - // If ErrorUnused is true, then it is an error for there to exist - // keys in the original map that were unused in the decoding process - // (extra keys). - ErrorUnused bool - - // If ErrorUnset is true, then it is an error for there to exist - // fields in the result that were not set in the decoding process - // (extra fields). This only applies to decoding to a struct. This - // will affect all nested structs as well. - ErrorUnset bool - - // ZeroFields, if set to true, will zero fields before writing them. - // For example, a map will be emptied before decoded values are put in - // it. If this is false, a map will be merged. - ZeroFields bool - - // If WeaklyTypedInput is true, the decoder will make the following - // "weak" conversions: - // - // - bools to string (true = "1", false = "0") - // - numbers to string (base 10) - // - bools to int/uint (true = 1, false = 0) - // - strings to int/uint (base implied by prefix) - // - int to bool (true if value != 0) - // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, - // FALSE, false, False. Anything else is an error) - // - empty array = empty map and vice versa - // - negative numbers to overflowed uint values (base 10) - // - slice of maps to a merged map - // - single values are converted to slices if required. Each - // element is weakly decoded. For example: "4" can become []int{4} - // if the target type is an int slice. - // - WeaklyTypedInput bool - - // Squash will squash embedded structs. A squash tag may also be - // added to an individual struct field using a tag. For example: - // - // type Parent struct { - // Child `mapstructure:",squash"` - // } - Squash bool - - // Metadata is the struct that will contain extra metadata about - // the decoding. If this is nil, then no metadata will be tracked. - Metadata *Metadata - - // Result is a pointer to the struct that will contain the decoded - // value. - Result interface{} - - // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" - TagName string - - // IgnoreUntaggedFields ignores all struct fields without explicit - // TagName, comparable to `mapstructure:"-"` as default behaviour. - IgnoreUntaggedFields bool - - // MatchName is the function used to match the map key to the struct - // field name or tag. Defaults to `strings.EqualFold`. This can be used - // to implement case-sensitive tag values, support snake casing, etc. - MatchName func(mapKey, fieldName string) bool -} - -// A Decoder takes a raw interface value and turns it into structured -// data, keeping track of rich error information along the way in case -// anything goes wrong. Unlike the basic top-level Decode method, you can -// more finely control how the Decoder behaves using the DecoderConfig -// structure. The top-level Decode method is just a convenience that sets -// up the most basic Decoder. -type Decoder struct { - config *DecoderConfig -} - -// Metadata contains information about decoding a structure that -// is tedious or difficult to get otherwise. -type Metadata struct { - // Keys are the keys of the structure which were successfully decoded - Keys []string - - // Unused is a slice of keys that were found in the raw value but - // weren't decoded since there was no matching field in the result interface - Unused []string - - // Unset is a slice of field names that were found in the result interface - // but weren't set in the decoding process since there was no matching value - // in the input - Unset []string -} - -// Decode takes an input structure and uses reflection to translate it to -// the output structure. output must be a pointer to a map or struct. -func Decode(input interface{}, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecode is the same as Decode but is shorthand to enable -// WeaklyTypedInput. See DecoderConfig for more info. -func WeakDecode(input, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// DecodeMetadata is the same as Decode, but is shorthand to -// enable metadata collection. See DecoderConfig for more info. -func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecodeMetadata is the same as Decode, but is shorthand to -// enable both WeaklyTypedInput and metadata collection. See -// DecoderConfig for more info. -func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// NewDecoder returns a new decoder for the given configuration. Once -// a decoder has been returned, the same configuration must not be used -// again. -func NewDecoder(config *DecoderConfig) (*Decoder, error) { - val := reflect.ValueOf(config.Result) - if val.Kind() != reflect.Ptr { - return nil, errors.New("result must be a pointer") - } - - val = val.Elem() - if !val.CanAddr() { - return nil, errors.New("result must be addressable (a pointer)") - } - - if config.Metadata != nil { - if config.Metadata.Keys == nil { - config.Metadata.Keys = make([]string, 0) - } - - if config.Metadata.Unused == nil { - config.Metadata.Unused = make([]string, 0) - } - - if config.Metadata.Unset == nil { - config.Metadata.Unset = make([]string, 0) - } - } - - if config.TagName == "" { - config.TagName = "mapstructure" - } - - if config.MatchName == nil { - config.MatchName = strings.EqualFold - } - - result := &Decoder{ - config: config, - } - - return result, nil -} - -// Decode decodes the given raw interface to the target pointer specified -// by the configuration. -func (d *Decoder) Decode(input interface{}) error { - return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) -} - -// Decodes an unknown data type into a specific reflection value. -func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { - var inputVal reflect.Value - if input != nil { - inputVal = reflect.ValueOf(input) - - // We need to check here if input is a typed nil. Typed nils won't - // match the "input == nil" below so we check that here. - if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { - input = nil - } - } - - if input == nil { - // If the data is nil, then we don't set anything, unless ZeroFields is set - // to true. - if d.config.ZeroFields { - outVal.Set(reflect.Zero(outVal.Type())) - - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - } - return nil - } - - if !inputVal.IsValid() { - // If the input value is invalid, then we just set the value - // to be the zero value. - outVal.Set(reflect.Zero(outVal.Type())) - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - return nil - } - - if d.config.DecodeHook != nil { - // We have a DecodeHook, so let's pre-process the input. - var err error - input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) - if err != nil { - return fmt.Errorf("error decoding '%s': %s", name, err) - } - } - - var err error - outputKind := getKind(outVal) - addMetaKey := true - switch outputKind { - case reflect.Bool: - err = d.decodeBool(name, input, outVal) - case reflect.Interface: - err = d.decodeBasic(name, input, outVal) - case reflect.String: - err = d.decodeString(name, input, outVal) - case reflect.Int: - err = d.decodeInt(name, input, outVal) - case reflect.Uint: - err = d.decodeUint(name, input, outVal) - case reflect.Float32: - err = d.decodeFloat(name, input, outVal) - case reflect.Struct: - err = d.decodeStruct(name, input, outVal) - case reflect.Map: - err = d.decodeMap(name, input, outVal) - case reflect.Ptr: - addMetaKey, err = d.decodePtr(name, input, outVal) - case reflect.Slice: - err = d.decodeSlice(name, input, outVal) - case reflect.Array: - err = d.decodeArray(name, input, outVal) - case reflect.Func: - err = d.decodeFunc(name, input, outVal) - default: - // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, outputKind) - } - - // If we reached here, then we successfully decoded SOMETHING, so - // mark the key as used if we're tracking metainput. - if addMetaKey && d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - - return err -} - -// This decodes a basic type (bool, int, string, etc.) and sets the -// value to "data" of that type. -func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { - if val.IsValid() && val.Elem().IsValid() { - elem := val.Elem() - - // If we can't address this element, then its not writable. Instead, - // we make a copy of the value (which is a pointer and therefore - // writable), decode into that, and replace the whole value. - copied := false - if !elem.CanAddr() { - copied = true - - // Make *T - copy := reflect.New(elem.Type()) - - // *T = elem - copy.Elem().Set(elem) - - // Set elem so we decode into it - elem = copy - } - - // Decode. If we have an error then return. We also return right - // away if we're not a copy because that means we decoded directly. - if err := d.decode(name, data, elem); err != nil || !copied { - return err - } - - // If we're a copy, we need to set te final result - val.Set(elem.Elem()) - return nil - } - - dataVal := reflect.ValueOf(data) - - // If the input data is a pointer, and the assigned type is the dereference - // of that exact pointer, then indirect it so that we can assign it. - // Example: *string to string - if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { - dataVal = reflect.Indirect(dataVal) - } - - if !dataVal.IsValid() { - dataVal = reflect.Zero(val.Type()) - } - - dataValType := dataVal.Type() - if !dataValType.AssignableTo(val.Type()) { - return fmt.Errorf( - "'%s' expected type '%s', got '%s'", - name, val.Type(), dataValType) - } - - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - converted := true - switch { - case dataKind == reflect.String: - val.SetString(dataVal.String()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetString("1") - } else { - val.SetString("0") - } - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatInt(dataVal.Int(), 10)) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput, - dataKind == reflect.Array && d.config.WeaklyTypedInput: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - switch elemKind { - case reflect.Uint8: - var uints []uint8 - if dataKind == reflect.Array { - uints = make([]uint8, dataVal.Len(), dataVal.Len()) - for i := range uints { - uints[i] = dataVal.Index(i).Interface().(uint8) - } - } else { - uints = dataVal.Interface().([]uint8) - } - val.SetString(string(uints)) - default: - converted = false - } - default: - converted = false - } - - if !converted { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetInt(dataVal.Int()) - case dataKind == reflect.Uint: - val.SetInt(int64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetInt(int64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetInt(1) - } else { - val.SetInt(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseInt(str, 0, val.Type().Bits()) - if err == nil { - val.SetInt(i) - } else { - return fmt.Errorf("cannot parse '%s' as int: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Int64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetInt(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - i := dataVal.Int() - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) - case dataKind == reflect.Uint: - val.SetUint(dataVal.Uint()) - case dataKind == reflect.Float32: - f := dataVal.Float() - if f < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %f overflows uint", - name, f) - } - val.SetUint(uint64(f)) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetUint(1) - } else { - val.SetUint(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseUint(str, 0, val.Type().Bits()) - if err == nil { - val.SetUint(i) - } else { - return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := strconv.ParseUint(string(jn), 0, 64) - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetUint(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Bool: - val.SetBool(dataVal.Bool()) - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Int() != 0) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Uint() != 0) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Float() != 0) - case dataKind == reflect.String && d.config.WeaklyTypedInput: - b, err := strconv.ParseBool(dataVal.String()) - if err == nil { - val.SetBool(b) - } else if dataVal.String() == "" { - val.SetBool(false) - } else { - return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetFloat(float64(dataVal.Int())) - case dataKind == reflect.Uint: - val.SetFloat(float64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetFloat(dataVal.Float()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetFloat(1) - } else { - val.SetFloat(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - f, err := strconv.ParseFloat(str, val.Type().Bits()) - if err == nil { - val.SetFloat(f) - } else { - return fmt.Errorf("cannot parse '%s' as float: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Float64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetFloat(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // By default we overwrite keys in the current map - valMap := val - - // If the map is nil or we're purposely zeroing fields, make a new map - if valMap.IsNil() || d.config.ZeroFields { - // Make a new map to hold our result - mapType := reflect.MapOf(valKeyType, valElemType) - valMap = reflect.MakeMap(mapType) - } - - // Check input type and based on the input type jump to the proper func - dataVal := reflect.Indirect(reflect.ValueOf(data)) - switch dataVal.Kind() { - case reflect.Map: - return d.decodeMapFromMap(name, dataVal, val, valMap) - - case reflect.Struct: - return d.decodeMapFromStruct(name, dataVal, val, valMap) - - case reflect.Array, reflect.Slice: - if d.config.WeaklyTypedInput { - return d.decodeMapFromSlice(name, dataVal, val, valMap) - } - - fallthrough - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - // Special case for BC reasons (covered by tests) - if dataVal.Len() == 0 { - val.Set(valMap) - return nil - } - - for i := 0; i < dataVal.Len(); i++ { - err := d.decode( - name+"["+strconv.Itoa(i)+"]", - dataVal.Index(i).Interface(), val) - if err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // Accumulate errors - errors := make([]string, 0) - - // If the input data is empty, then we just match what the input data is. - if dataVal.Len() == 0 { - if dataVal.IsNil() { - if !val.IsNil() { - val.Set(dataVal) - } - } else { - // Set to empty allocated value - val.Set(valMap) - } - - return nil - } - - for _, k := range dataVal.MapKeys() { - fieldName := name + "[" + k.String() + "]" - - // First decode the key into the proper type - currentKey := reflect.Indirect(reflect.New(valKeyType)) - if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) - continue - } - - // Next decode the data into the proper type - v := dataVal.MapIndex(k).Interface() - currentVal := reflect.Indirect(reflect.New(valElemType)) - if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) - continue - } - - valMap.SetMapIndex(currentKey, currentVal) - } - - // Set the built up map to the value - val.Set(valMap) - - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - typ := dataVal.Type() - for i := 0; i < typ.NumField(); i++ { - // Get the StructField first since this is a cheap operation. If the - // field is unexported, then ignore it. - f := typ.Field(i) - if f.PkgPath != "" { - continue - } - - // Next get the actual value of this field and verify it is assignable - // to the map value. - v := dataVal.Field(i) - if !v.Type().AssignableTo(valMap.Type().Elem()) { - return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) - } - - tagValue := f.Tag.Get(d.config.TagName) - keyName := f.Name - - if tagValue == "" && d.config.IgnoreUntaggedFields { - continue - } - - // If Squash is set in the config, we squash the field down. - squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous - - v = dereferencePtrToStructIfNeeded(v, d.config.TagName) - - // Determine the name of the key in the map - if index := strings.Index(tagValue, ","); index != -1 { - if tagValue[:index] == "-" { - continue - } - // If "omitempty" is specified in the tag, it ignores empty values. - if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { - continue - } - - // If "squash" is specified in the tag, we squash the field down. - squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 - if squash { - // When squashing, the embedded type can be a pointer to a struct. - if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { - v = v.Elem() - } - - // The final type must be a struct - if v.Kind() != reflect.Struct { - return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) - } - } - if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { - keyName = keyNameTagValue - } - } else if len(tagValue) > 0 { - if tagValue == "-" { - continue - } - keyName = tagValue - } - - switch v.Kind() { - // this is an embedded struct, so handle it differently - case reflect.Struct: - x := reflect.New(v.Type()) - x.Elem().Set(v) - - vType := valMap.Type() - vKeyType := vType.Key() - vElemType := vType.Elem() - mType := reflect.MapOf(vKeyType, vElemType) - vMap := reflect.MakeMap(mType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(vMap.Type()) - reflect.Indirect(addrVal).Set(vMap) - - err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) - if err != nil { - return err - } - - // the underlying map may have been completely overwritten so pull - // it indirectly out of the enclosing value. - vMap = reflect.Indirect(addrVal) - - if squash { - for _, k := range vMap.MapKeys() { - valMap.SetMapIndex(k, vMap.MapIndex(k)) - } - } else { - valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) - } - - default: - valMap.SetMapIndex(reflect.ValueOf(keyName), v) - } - } - - if val.CanAddr() { - val.Set(valMap) - } - - return nil -} - -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { - // If the input data is nil, then we want to just set the output - // pointer to be nil as well. - isNil := data == nil - if !isNil { - switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { - case reflect.Chan, - reflect.Func, - reflect.Interface, - reflect.Map, - reflect.Ptr, - reflect.Slice: - isNil = v.IsNil() - } - } - if isNil { - if !val.IsNil() && val.CanSet() { - nilValue := reflect.New(val.Type()).Elem() - val.Set(nilValue) - } - - return true, nil - } - - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - valType := val.Type() - valElemType := valType.Elem() - if val.CanSet() { - realVal := val - if realVal.IsNil() || d.config.ZeroFields { - realVal = reflect.New(valElemType) - } - - if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return false, err - } - - val.Set(realVal) - } else { - if err := d.decode(name, data, reflect.Indirect(val)); err != nil { - return false, err - } - } - return false, nil -} - -func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - dataVal := reflect.Indirect(reflect.ValueOf(data)) - if val.Type() != dataVal.Type() { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - sliceType := reflect.SliceOf(valElemType) - - // If we have a non array/slice type then we first attempt to convert. - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Slice and array we use the normal logic - case dataValKind == reflect.Slice, dataValKind == reflect.Array: - break - - // Empty maps turn into empty slices - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } - // Create slice of maps of other sizes - return d.decodeSlice(name, []interface{}{data}, val) - - case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: - return d.decodeSlice(name, []byte(dataVal.String()), val) - - // All other types we try to convert to the slice type - // and "lift" it into it. i.e. a string becomes a string slice. - default: - // Just re-try this function with data as a slice. - return d.decodeSlice(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - } - - // If the input value is nil, then don't allocate since empty != nil - if dataValKind != reflect.Array && dataVal.IsNil() { - return nil - } - - valSlice := val - if valSlice.IsNil() || d.config.ZeroFields { - // Make a new slice to hold our result, same size as the original data. - valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - for valSlice.Len() <= i { - valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) - } - currentField := valSlice.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the slice we built up - val.Set(valSlice) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - arrayType := reflect.ArrayOf(valType.Len(), valElemType) - - valArray := val - - if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Empty maps turn into empty arrays - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.Zero(arrayType)) - return nil - } - - // All other types we try to convert to the array type - // and "lift" it into it. i.e. a string becomes a string array. - default: - // Just re-try this function with data as a slice. - return d.decodeArray(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } - if dataVal.Len() > arrayType.Len() { - return fmt.Errorf( - "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) - - } - - // Make a new array to hold our result, same size as the original data. - valArray = reflect.New(arrayType).Elem() - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - currentField := valArray.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the array we built up - val.Set(valArray) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - - // If the type of the value to write to and the data match directly, - // then we just set it directly instead of recursing into the structure. - if dataVal.Type() == val.Type() { - val.Set(dataVal) - return nil - } - - dataValKind := dataVal.Kind() - switch dataValKind { - case reflect.Map: - return d.decodeStructFromMap(name, dataVal, val) - - case reflect.Struct: - // Not the most efficient way to do this but we can optimize later if - // we want to. To convert from struct to struct we go to map first - // as an intermediary. - - // Make a new map to hold our result - mapType := reflect.TypeOf((map[string]interface{})(nil)) - mval := reflect.MakeMap(mapType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(mval.Type()) - - reflect.Indirect(addrVal).Set(mval) - if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { - return err - } - - result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) - return result - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { - dataValType := dataVal.Type() - if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { - return fmt.Errorf( - "'%s' needs a map with string keys, has '%s' keys", - name, dataValType.Key().Kind()) - } - - dataValKeys := make(map[reflect.Value]struct{}) - dataValKeysUnused := make(map[interface{}]struct{}) - for _, dataValKey := range dataVal.MapKeys() { - dataValKeys[dataValKey] = struct{}{} - dataValKeysUnused[dataValKey.Interface()] = struct{}{} - } - - targetValKeysUnused := make(map[interface{}]struct{}) - errors := make([]string, 0) - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = val - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - - // remainField is set to a valid field set with the "remain" tag if - // we are keeping track of remaining values. - var remainField *field - - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - fieldVal := structVal.Field(i) - if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { - // Handle embedded struct pointers as embedded structs. - fieldVal = fieldVal.Elem() - } - - // If "squash" is specified in the tag, we squash the field down. - squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous - remain := false - - // We always parse the tags cause we're looking for other tags too - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - - if tag == "remain" { - remain = true - break - } - } - - if squash { - if fieldVal.Kind() != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) - } else { - structs = append(structs, fieldVal) - } - continue - } - - // Build our field - if remain { - remainField = &field{fieldType, fieldVal} - } else { - // Normal struct field, store it away - fields = append(fields, field{fieldType, fieldVal}) - } - } - } - - // for fieldType, field := range fields { - for _, f := range fields { - field, fieldValue := f.field, f.val - fieldName := field.Name - - tagValue := field.Tag.Get(d.config.TagName) - tagValue = strings.SplitN(tagValue, ",", 2)[0] - if tagValue != "" { - fieldName = tagValue - } - - rawMapKey := reflect.ValueOf(fieldName) - rawMapVal := dataVal.MapIndex(rawMapKey) - if !rawMapVal.IsValid() { - // Do a slower search by iterating over each key and - // doing case-insensitive search. - for dataValKey := range dataValKeys { - mK, ok := dataValKey.Interface().(string) - if !ok { - // Not a string key - continue - } - - if d.config.MatchName(mK, fieldName) { - rawMapKey = dataValKey - rawMapVal = dataVal.MapIndex(dataValKey) - break - } - } - - if !rawMapVal.IsValid() { - // There was no matching key in the map for the value in - // the struct. Remember it for potential errors and metadata. - targetValKeysUnused[fieldName] = struct{}{} - continue - } - } - - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - - // If the name is empty string, then we're at the root, and we - // don't dot-join the fields. - if name != "" { - fieldName = name + "." + fieldName - } - - if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { - errors = appendErrors(errors, err) - } - } - - // If we have a "remain"-tagged field and we have unused keys then - // we put the unused keys directly into the remain field. - if remainField != nil && len(dataValKeysUnused) > 0 { - // Build a map of only the unused values - remain := map[interface{}]interface{}{} - for key := range dataValKeysUnused { - remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() - } - - // Decode it as-if we were just decoding this map onto our map. - if err := d.decodeMap(name, remain, remainField.val); err != nil { - errors = appendErrors(errors, err) - } - - // Set the map to nil so we have none so that the next check will - // not error (ErrorUnused) - dataValKeysUnused = nil - } - - if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { - keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey := range dataValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { - keys := make([]string, 0, len(targetValKeysUnused)) - for rawKey := range targetValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if len(errors) > 0 { - return &Error{errors} - } - - // Add the unused keys to the list of unused keys if we're tracking metadata - if d.config.Metadata != nil { - for rawKey := range dataValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) - } - for rawKey := range targetValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) - } - } - - return nil -} - -func isEmptyValue(v reflect.Value) bool { - switch getKind(v) { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func getKind(val reflect.Value) reflect.Kind { - kind := val.Kind() - - switch { - case kind >= reflect.Int && kind <= reflect.Int64: - return reflect.Int - case kind >= reflect.Uint && kind <= reflect.Uint64: - return reflect.Uint - case kind >= reflect.Float32 && kind <= reflect.Float64: - return reflect.Float32 - default: - return kind - } -} - -func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { - for i := 0; i < typ.NumField(); i++ { - f := typ.Field(i) - if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields - return true - } - if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside - return true - } - } - return false -} - -func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { - return v - } - deref := v.Elem() - derefT := deref.Type() - if isStructTypeConvertibleToMap(derefT, true, tagName) { - return deref - } - return v -} diff --git a/vendor/github.com/moby/docker-image-spec/LICENSE b/vendor/github.com/moby/docker-image-spec/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/github.com/moby/docker-image-spec/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/moby/docker-image-spec/specs-go/v1/image.go b/vendor/github.com/moby/docker-image-spec/specs-go/v1/image.go deleted file mode 100644 index 1672617635..0000000000 --- a/vendor/github.com/moby/docker-image-spec/specs-go/v1/image.go +++ /dev/null @@ -1,54 +0,0 @@ -package v1 - -import ( - "time" - - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -const DockerOCIImageMediaType = "application/vnd.docker.container.image.v1+json" - -// DockerOCIImage is a ocispec.Image extended with Docker specific Config. -type DockerOCIImage struct { - ocispec.Image - - // Shadow ocispec.Image.Config - Config DockerOCIImageConfig `json:"config,omitempty"` -} - -// DockerOCIImageConfig is a ocispec.ImageConfig extended with Docker specific fields. -type DockerOCIImageConfig struct { - ocispec.ImageConfig - - DockerOCIImageConfigExt -} - -// DockerOCIImageConfigExt contains Docker-specific fields in DockerImageConfig. -type DockerOCIImageConfigExt struct { - Healthcheck *HealthcheckConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - - OnBuild []string `json:",omitempty"` // ONBUILD metadata that were defined on the image Dockerfile - Shell []string `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - -// HealthcheckConfig holds configuration settings for the HEALTHCHECK feature. -type HealthcheckConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - StartInterval time.Duration `json:",omitempty"` // The interval to attempt healthchecks at during the start period - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} diff --git a/vendor/github.com/oklog/ulid/.gitignore b/vendor/github.com/oklog/ulid/.gitignore deleted file mode 100644 index c92c4d5608..0000000000 --- a/vendor/github.com/oklog/ulid/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -#### joe made this: http://goel.io/joe - -#####=== Go ===##### - -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - diff --git a/vendor/github.com/oklog/ulid/.travis.yml b/vendor/github.com/oklog/ulid/.travis.yml deleted file mode 100644 index 43eb762fa3..0000000000 --- a/vendor/github.com/oklog/ulid/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -sudo: false -go: - - 1.10.x -install: - - go get -v github.com/golang/lint/golint - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - - go get -d -t -v ./... - - go build -v ./... -script: - - go vet ./... - - $HOME/gopath/bin/golint . - - go test -v -race ./... - - go test -v -covermode=count -coverprofile=cov.out - - $HOME/gopath/bin/goveralls -coverprofile=cov.out -service=travis-ci -repotoken "$COVERALLS_TOKEN" || true diff --git a/vendor/github.com/oklog/ulid/AUTHORS.md b/vendor/github.com/oklog/ulid/AUTHORS.md deleted file mode 100644 index 95581c78b0..0000000000 --- a/vendor/github.com/oklog/ulid/AUTHORS.md +++ /dev/null @@ -1,2 +0,0 @@ -- Peter Bourgon (@peterbourgon) -- Tomás Senart (@tsenart) diff --git a/vendor/github.com/oklog/ulid/CHANGELOG.md b/vendor/github.com/oklog/ulid/CHANGELOG.md deleted file mode 100644 index 8da38c6b00..0000000000 --- a/vendor/github.com/oklog/ulid/CHANGELOG.md +++ /dev/null @@ -1,33 +0,0 @@ -## 1.3.1 / 2018-10-02 - -* Use underlying entropy source for random increments in Monotonic (#32) - -## 1.3.0 / 2018-09-29 - -* Monotonic entropy support (#31) - -## 1.2.0 / 2018-09-09 - -* Add a function to convert Unix time in milliseconds back to time.Time (#30) - -## 1.1.0 / 2018-08-15 - -* Ensure random part is always read from the entropy reader in full (#28) - -## 1.0.0 / 2018-07-29 - -* Add ParseStrict and MustParseStrict functions (#26) -* Enforce overflow checking when parsing (#20) - -## 0.3.0 / 2017-01-03 - -* Implement ULID.Compare method - -## 0.2.0 / 2016-12-13 - -* Remove year 2262 Timestamp bug. (#1) -* Gracefully handle invalid encodings when parsing. - -## 0.1.0 / 2016-12-06 - -* First ULID release diff --git a/vendor/github.com/oklog/ulid/CONTRIBUTING.md b/vendor/github.com/oklog/ulid/CONTRIBUTING.md deleted file mode 100644 index 68f03f26eb..0000000000 --- a/vendor/github.com/oklog/ulid/CONTRIBUTING.md +++ /dev/null @@ -1,17 +0,0 @@ -# Contributing - -We use GitHub to manage reviews of pull requests. - -* If you have a trivial fix or improvement, go ahead and create a pull - request, addressing (with `@...`) one or more of the maintainers - (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. - -* If you plan to do something more involved, first propose your ideas - in a Github issue. This will avoid unnecessary work and surely give - you and us a good deal of inspiration. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/oklog/ulid/Gopkg.lock b/vendor/github.com/oklog/ulid/Gopkg.lock deleted file mode 100644 index 349b449a6e..0000000000 --- a/vendor/github.com/oklog/ulid/Gopkg.lock +++ /dev/null @@ -1,15 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - name = "github.com/pborman/getopt" - packages = ["v2"] - revision = "7148bc3a4c3008adfcab60cbebfd0576018f330b" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "6779b05abd5cd429c5393641d2453005a3cb74a400d161b2b5c5d0ca2e10e116" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/oklog/ulid/Gopkg.toml b/vendor/github.com/oklog/ulid/Gopkg.toml deleted file mode 100644 index 624a7a019c..0000000000 --- a/vendor/github.com/oklog/ulid/Gopkg.toml +++ /dev/null @@ -1,26 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - branch = "master" - name = "github.com/pborman/getopt" diff --git a/vendor/github.com/oklog/ulid/LICENSE b/vendor/github.com/oklog/ulid/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/github.com/oklog/ulid/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/oklog/ulid/README.md b/vendor/github.com/oklog/ulid/README.md deleted file mode 100644 index 0a3d2f82b2..0000000000 --- a/vendor/github.com/oklog/ulid/README.md +++ /dev/null @@ -1,150 +0,0 @@ -# Universally Unique Lexicographically Sortable Identifier - -![Project status](https://img.shields.io/badge/version-1.3.0-yellow.svg) -[![Build Status](https://secure.travis-ci.org/oklog/ulid.png)](http://travis-ci.org/oklog/ulid) -[![Go Report Card](https://goreportcard.com/badge/oklog/ulid?cache=0)](https://goreportcard.com/report/oklog/ulid) -[![Coverage Status](https://coveralls.io/repos/github/oklog/ulid/badge.svg?branch=master&cache=0)](https://coveralls.io/github/oklog/ulid?branch=master) -[![GoDoc](https://godoc.org/github.com/oklog/ulid?status.svg)](https://godoc.org/github.com/oklog/ulid) -[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/ulid/master/LICENSE) - -A Go port of [alizain/ulid](https://github.com/alizain/ulid) with binary format implemented. - -## Background - -A GUID/UUID can be suboptimal for many use-cases because: - -- It isn't the most character efficient way of encoding 128 bits -- UUID v1/v2 is impractical in many environments, as it requires access to a unique, stable MAC address -- UUID v3/v5 requires a unique seed and produces randomly distributed IDs, which can cause fragmentation in many data structures -- UUID v4 provides no other information than randomness which can cause fragmentation in many data structures - -A ULID however: - -- Is compatible with UUID/GUID's -- 1.21e+24 unique ULIDs per millisecond (1,208,925,819,614,629,174,706,176 to be exact) -- Lexicographically sortable -- Canonically encoded as a 26 character string, as opposed to the 36 character UUID -- Uses Crockford's base32 for better efficiency and readability (5 bits per character) -- Case insensitive -- No special characters (URL safe) -- Monotonic sort order (correctly detects and handles the same millisecond) - -## Install - -```shell -go get github.com/oklog/ulid -``` - -## Usage - -An ULID is constructed with a `time.Time` and an `io.Reader` entropy source. -This design allows for greater flexibility in choosing your trade-offs. - -Please note that `rand.Rand` from the `math` package is *not* safe for concurrent use. -Instantiate one per long living go-routine or use a `sync.Pool` if you want to avoid the potential contention of a locked `rand.Source` as its been frequently observed in the package level functions. - - -```go -func ExampleULID() { - t := time.Unix(1000000, 0) - entropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0) - fmt.Println(ulid.MustNew(ulid.Timestamp(t), entropy)) - // Output: 0000XSNJG0MQJHBF4QX1EFD6Y3 -} - -``` - -## Specification - -Below is the current specification of ULID as implemented in this repository. - -### Components - -**Timestamp** -- 48 bits -- UNIX-time in milliseconds -- Won't run out of space till the year 10895 AD - -**Entropy** -- 80 bits -- User defined entropy source. -- Monotonicity within the same millisecond with [`ulid.Monotonic`](https://godoc.org/github.com/oklog/ulid#Monotonic) - -### Encoding - -[Crockford's Base32](http://www.crockford.com/wrmg/base32.html) is used as shown. -This alphabet excludes the letters I, L, O, and U to avoid confusion and abuse. - -``` -0123456789ABCDEFGHJKMNPQRSTVWXYZ -``` - -### Binary Layout and Byte Order - -The components are encoded as 16 octets. Each component is encoded with the Most Significant Byte first (network byte order). - -``` -0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 32_bit_uint_time_high | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 16_bit_uint_time_low | 16_bit_uint_random | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 32_bit_uint_random | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 32_bit_uint_random | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -``` - -### String Representation - -``` - 01AN4Z07BY 79KA1307SR9X4MV3 -|----------| |----------------| - Timestamp Entropy - 10 chars 16 chars - 48bits 80bits - base32 base32 -``` - -## Test - -```shell -go test ./... -``` - -## Benchmarks - -On a Intel Core i7 Ivy Bridge 2.7 GHz, MacOS 10.12.1 and Go 1.8.0beta1 - -``` -BenchmarkNew/WithCryptoEntropy-8 2000000 771 ns/op 20.73 MB/s 16 B/op 1 allocs/op -BenchmarkNew/WithEntropy-8 20000000 65.8 ns/op 243.01 MB/s 16 B/op 1 allocs/op -BenchmarkNew/WithoutEntropy-8 50000000 30.0 ns/op 534.06 MB/s 16 B/op 1 allocs/op -BenchmarkMustNew/WithCryptoEntropy-8 2000000 781 ns/op 20.48 MB/s 16 B/op 1 allocs/op -BenchmarkMustNew/WithEntropy-8 20000000 70.0 ns/op 228.51 MB/s 16 B/op 1 allocs/op -BenchmarkMustNew/WithoutEntropy-8 50000000 34.6 ns/op 462.98 MB/s 16 B/op 1 allocs/op -BenchmarkParse-8 50000000 30.0 ns/op 866.16 MB/s 0 B/op 0 allocs/op -BenchmarkMustParse-8 50000000 35.2 ns/op 738.94 MB/s 0 B/op 0 allocs/op -BenchmarkString-8 20000000 64.9 ns/op 246.40 MB/s 32 B/op 1 allocs/op -BenchmarkMarshal/Text-8 20000000 55.8 ns/op 286.84 MB/s 32 B/op 1 allocs/op -BenchmarkMarshal/TextTo-8 100000000 22.4 ns/op 714.91 MB/s 0 B/op 0 allocs/op -BenchmarkMarshal/Binary-8 300000000 4.02 ns/op 3981.77 MB/s 0 B/op 0 allocs/op -BenchmarkMarshal/BinaryTo-8 2000000000 1.18 ns/op 13551.75 MB/s 0 B/op 0 allocs/op -BenchmarkUnmarshal/Text-8 100000000 20.5 ns/op 1265.27 MB/s 0 B/op 0 allocs/op -BenchmarkUnmarshal/Binary-8 300000000 4.94 ns/op 3240.01 MB/s 0 B/op 0 allocs/op -BenchmarkNow-8 100000000 15.1 ns/op 528.09 MB/s 0 B/op 0 allocs/op -BenchmarkTimestamp-8 2000000000 0.29 ns/op 27271.59 MB/s 0 B/op 0 allocs/op -BenchmarkTime-8 2000000000 0.58 ns/op 13717.80 MB/s 0 B/op 0 allocs/op -BenchmarkSetTime-8 2000000000 0.89 ns/op 9023.95 MB/s 0 B/op 0 allocs/op -BenchmarkEntropy-8 200000000 7.62 ns/op 1311.66 MB/s 0 B/op 0 allocs/op -BenchmarkSetEntropy-8 2000000000 0.88 ns/op 11376.54 MB/s 0 B/op 0 allocs/op -BenchmarkCompare-8 200000000 7.34 ns/op 4359.23 MB/s 0 B/op 0 allocs/op -``` - -## Prior Art - -- [alizain/ulid](https://github.com/alizain/ulid) -- [RobThree/NUlid](https://github.com/RobThree/NUlid) -- [imdario/go-ulid](https://github.com/imdario/go-ulid) diff --git a/vendor/github.com/oklog/ulid/ulid.go b/vendor/github.com/oklog/ulid/ulid.go deleted file mode 100644 index c5d0d66fd2..0000000000 --- a/vendor/github.com/oklog/ulid/ulid.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2016 The Oklog Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ulid - -import ( - "bufio" - "bytes" - "database/sql/driver" - "encoding/binary" - "errors" - "io" - "math" - "math/bits" - "math/rand" - "time" -) - -/* -An ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier - - The components are encoded as 16 octets. - Each component is encoded with the MSB first (network byte order). - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 32_bit_uint_time_high | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 16_bit_uint_time_low | 16_bit_uint_random | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 32_bit_uint_random | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 32_bit_uint_random | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -*/ -type ULID [16]byte - -var ( - // ErrDataSize is returned when parsing or unmarshaling ULIDs with the wrong - // data size. - ErrDataSize = errors.New("ulid: bad data size when unmarshaling") - - // ErrInvalidCharacters is returned when parsing or unmarshaling ULIDs with - // invalid Base32 encodings. - ErrInvalidCharacters = errors.New("ulid: bad data characters when unmarshaling") - - // ErrBufferSize is returned when marshalling ULIDs to a buffer of insufficient - // size. - ErrBufferSize = errors.New("ulid: bad buffer size when marshaling") - - // ErrBigTime is returned when constructing an ULID with a time that is larger - // than MaxTime. - ErrBigTime = errors.New("ulid: time too big") - - // ErrOverflow is returned when unmarshaling a ULID whose first character is - // larger than 7, thereby exceeding the valid bit depth of 128. - ErrOverflow = errors.New("ulid: overflow when unmarshaling") - - // ErrMonotonicOverflow is returned by a Monotonic entropy source when - // incrementing the previous ULID's entropy bytes would result in overflow. - ErrMonotonicOverflow = errors.New("ulid: monotonic entropy overflow") - - // ErrScanValue is returned when the value passed to scan cannot be unmarshaled - // into the ULID. - ErrScanValue = errors.New("ulid: source value must be a string or byte slice") -) - -// New returns an ULID with the given Unix milliseconds timestamp and an -// optional entropy source. Use the Timestamp function to convert -// a time.Time to Unix milliseconds. -// -// ErrBigTime is returned when passing a timestamp bigger than MaxTime. -// Reading from the entropy source may also return an error. -func New(ms uint64, entropy io.Reader) (id ULID, err error) { - if err = id.SetTime(ms); err != nil { - return id, err - } - - switch e := entropy.(type) { - case nil: - return id, err - case *monotonic: - err = e.MonotonicRead(ms, id[6:]) - default: - _, err = io.ReadFull(e, id[6:]) - } - - return id, err -} - -// MustNew is a convenience function equivalent to New that panics on failure -// instead of returning an error. -func MustNew(ms uint64, entropy io.Reader) ULID { - id, err := New(ms, entropy) - if err != nil { - panic(err) - } - return id -} - -// Parse parses an encoded ULID, returning an error in case of failure. -// -// ErrDataSize is returned if the len(ulid) is different from an encoded -// ULID's length. Invalid encodings produce undefined ULIDs. For a version that -// returns an error instead, see ParseStrict. -func Parse(ulid string) (id ULID, err error) { - return id, parse([]byte(ulid), false, &id) -} - -// ParseStrict parses an encoded ULID, returning an error in case of failure. -// -// It is like Parse, but additionally validates that the parsed ULID consists -// only of valid base32 characters. It is slightly slower than Parse. -// -// ErrDataSize is returned if the len(ulid) is different from an encoded -// ULID's length. Invalid encodings return ErrInvalidCharacters. -func ParseStrict(ulid string) (id ULID, err error) { - return id, parse([]byte(ulid), true, &id) -} - -func parse(v []byte, strict bool, id *ULID) error { - // Check if a base32 encoded ULID is the right length. - if len(v) != EncodedSize { - return ErrDataSize - } - - // Check if all the characters in a base32 encoded ULID are part of the - // expected base32 character set. - if strict && - (dec[v[0]] == 0xFF || - dec[v[1]] == 0xFF || - dec[v[2]] == 0xFF || - dec[v[3]] == 0xFF || - dec[v[4]] == 0xFF || - dec[v[5]] == 0xFF || - dec[v[6]] == 0xFF || - dec[v[7]] == 0xFF || - dec[v[8]] == 0xFF || - dec[v[9]] == 0xFF || - dec[v[10]] == 0xFF || - dec[v[11]] == 0xFF || - dec[v[12]] == 0xFF || - dec[v[13]] == 0xFF || - dec[v[14]] == 0xFF || - dec[v[15]] == 0xFF || - dec[v[16]] == 0xFF || - dec[v[17]] == 0xFF || - dec[v[18]] == 0xFF || - dec[v[19]] == 0xFF || - dec[v[20]] == 0xFF || - dec[v[21]] == 0xFF || - dec[v[22]] == 0xFF || - dec[v[23]] == 0xFF || - dec[v[24]] == 0xFF || - dec[v[25]] == 0xFF) { - return ErrInvalidCharacters - } - - // Check if the first character in a base32 encoded ULID will overflow. This - // happens because the base32 representation encodes 130 bits, while the - // ULID is only 128 bits. - // - // See https://github.com/oklog/ulid/issues/9 for details. - if v[0] > '7' { - return ErrOverflow - } - - // Use an optimized unrolled loop (from https://github.com/RobThree/NUlid) - // to decode a base32 ULID. - - // 6 bytes timestamp (48 bits) - (*id)[0] = ((dec[v[0]] << 5) | dec[v[1]]) - (*id)[1] = ((dec[v[2]] << 3) | (dec[v[3]] >> 2)) - (*id)[2] = ((dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4)) - (*id)[3] = ((dec[v[5]] << 4) | (dec[v[6]] >> 1)) - (*id)[4] = ((dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3)) - (*id)[5] = ((dec[v[8]] << 5) | dec[v[9]]) - - // 10 bytes of entropy (80 bits) - (*id)[6] = ((dec[v[10]] << 3) | (dec[v[11]] >> 2)) - (*id)[7] = ((dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4)) - (*id)[8] = ((dec[v[13]] << 4) | (dec[v[14]] >> 1)) - (*id)[9] = ((dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3)) - (*id)[10] = ((dec[v[16]] << 5) | dec[v[17]]) - (*id)[11] = ((dec[v[18]] << 3) | dec[v[19]]>>2) - (*id)[12] = ((dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4)) - (*id)[13] = ((dec[v[21]] << 4) | (dec[v[22]] >> 1)) - (*id)[14] = ((dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3)) - (*id)[15] = ((dec[v[24]] << 5) | dec[v[25]]) - - return nil -} - -// MustParse is a convenience function equivalent to Parse that panics on failure -// instead of returning an error. -func MustParse(ulid string) ULID { - id, err := Parse(ulid) - if err != nil { - panic(err) - } - return id -} - -// MustParseStrict is a convenience function equivalent to ParseStrict that -// panics on failure instead of returning an error. -func MustParseStrict(ulid string) ULID { - id, err := ParseStrict(ulid) - if err != nil { - panic(err) - } - return id -} - -// String returns a lexicographically sortable string encoded ULID -// (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3 -// Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy -func (id ULID) String() string { - ulid := make([]byte, EncodedSize) - _ = id.MarshalTextTo(ulid) - return string(ulid) -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface by -// returning the ULID as a byte slice. -func (id ULID) MarshalBinary() ([]byte, error) { - ulid := make([]byte, len(id)) - return ulid, id.MarshalBinaryTo(ulid) -} - -// MarshalBinaryTo writes the binary encoding of the ULID to the given buffer. -// ErrBufferSize is returned when the len(dst) != 16. -func (id ULID) MarshalBinaryTo(dst []byte) error { - if len(dst) != len(id) { - return ErrBufferSize - } - - copy(dst, id[:]) - return nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface by -// copying the passed data and converting it to an ULID. ErrDataSize is -// returned if the data length is different from ULID length. -func (id *ULID) UnmarshalBinary(data []byte) error { - if len(data) != len(*id) { - return ErrDataSize - } - - copy((*id)[:], data) - return nil -} - -// Encoding is the base 32 encoding alphabet used in ULID strings. -const Encoding = "0123456789ABCDEFGHJKMNPQRSTVWXYZ" - -// MarshalText implements the encoding.TextMarshaler interface by -// returning the string encoded ULID. -func (id ULID) MarshalText() ([]byte, error) { - ulid := make([]byte, EncodedSize) - return ulid, id.MarshalTextTo(ulid) -} - -// MarshalTextTo writes the ULID as a string to the given buffer. -// ErrBufferSize is returned when the len(dst) != 26. -func (id ULID) MarshalTextTo(dst []byte) error { - // Optimized unrolled loop ahead. - // From https://github.com/RobThree/NUlid - - if len(dst) != EncodedSize { - return ErrBufferSize - } - - // 10 byte timestamp - dst[0] = Encoding[(id[0]&224)>>5] - dst[1] = Encoding[id[0]&31] - dst[2] = Encoding[(id[1]&248)>>3] - dst[3] = Encoding[((id[1]&7)<<2)|((id[2]&192)>>6)] - dst[4] = Encoding[(id[2]&62)>>1] - dst[5] = Encoding[((id[2]&1)<<4)|((id[3]&240)>>4)] - dst[6] = Encoding[((id[3]&15)<<1)|((id[4]&128)>>7)] - dst[7] = Encoding[(id[4]&124)>>2] - dst[8] = Encoding[((id[4]&3)<<3)|((id[5]&224)>>5)] - dst[9] = Encoding[id[5]&31] - - // 16 bytes of entropy - dst[10] = Encoding[(id[6]&248)>>3] - dst[11] = Encoding[((id[6]&7)<<2)|((id[7]&192)>>6)] - dst[12] = Encoding[(id[7]&62)>>1] - dst[13] = Encoding[((id[7]&1)<<4)|((id[8]&240)>>4)] - dst[14] = Encoding[((id[8]&15)<<1)|((id[9]&128)>>7)] - dst[15] = Encoding[(id[9]&124)>>2] - dst[16] = Encoding[((id[9]&3)<<3)|((id[10]&224)>>5)] - dst[17] = Encoding[id[10]&31] - dst[18] = Encoding[(id[11]&248)>>3] - dst[19] = Encoding[((id[11]&7)<<2)|((id[12]&192)>>6)] - dst[20] = Encoding[(id[12]&62)>>1] - dst[21] = Encoding[((id[12]&1)<<4)|((id[13]&240)>>4)] - dst[22] = Encoding[((id[13]&15)<<1)|((id[14]&128)>>7)] - dst[23] = Encoding[(id[14]&124)>>2] - dst[24] = Encoding[((id[14]&3)<<3)|((id[15]&224)>>5)] - dst[25] = Encoding[id[15]&31] - - return nil -} - -// Byte to index table for O(1) lookups when unmarshaling. -// We use 0xFF as sentinel value for invalid indexes. -var dec = [...]byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, - 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, - 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, - 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, - 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, - 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, - 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, -} - -// EncodedSize is the length of a text encoded ULID. -const EncodedSize = 26 - -// UnmarshalText implements the encoding.TextUnmarshaler interface by -// parsing the data as string encoded ULID. -// -// ErrDataSize is returned if the len(v) is different from an encoded -// ULID's length. Invalid encodings produce undefined ULIDs. -func (id *ULID) UnmarshalText(v []byte) error { - return parse(v, false, id) -} - -// Time returns the Unix time in milliseconds encoded in the ULID. -// Use the top level Time function to convert the returned value to -// a time.Time. -func (id ULID) Time() uint64 { - return uint64(id[5]) | uint64(id[4])<<8 | - uint64(id[3])<<16 | uint64(id[2])<<24 | - uint64(id[1])<<32 | uint64(id[0])<<40 -} - -// maxTime is the maximum Unix time in milliseconds that can be -// represented in an ULID. -var maxTime = ULID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}.Time() - -// MaxTime returns the maximum Unix time in milliseconds that -// can be encoded in an ULID. -func MaxTime() uint64 { return maxTime } - -// Now is a convenience function that returns the current -// UTC time in Unix milliseconds. Equivalent to: -// Timestamp(time.Now().UTC()) -func Now() uint64 { return Timestamp(time.Now().UTC()) } - -// Timestamp converts a time.Time to Unix milliseconds. -// -// Because of the way ULID stores time, times from the year -// 10889 produces undefined results. -func Timestamp(t time.Time) uint64 { - return uint64(t.Unix())*1000 + - uint64(t.Nanosecond()/int(time.Millisecond)) -} - -// Time converts Unix milliseconds in the format -// returned by the Timestamp function to a time.Time. -func Time(ms uint64) time.Time { - s := int64(ms / 1e3) - ns := int64((ms % 1e3) * 1e6) - return time.Unix(s, ns) -} - -// SetTime sets the time component of the ULID to the given Unix time -// in milliseconds. -func (id *ULID) SetTime(ms uint64) error { - if ms > maxTime { - return ErrBigTime - } - - (*id)[0] = byte(ms >> 40) - (*id)[1] = byte(ms >> 32) - (*id)[2] = byte(ms >> 24) - (*id)[3] = byte(ms >> 16) - (*id)[4] = byte(ms >> 8) - (*id)[5] = byte(ms) - - return nil -} - -// Entropy returns the entropy from the ULID. -func (id ULID) Entropy() []byte { - e := make([]byte, 10) - copy(e, id[6:]) - return e -} - -// SetEntropy sets the ULID entropy to the passed byte slice. -// ErrDataSize is returned if len(e) != 10. -func (id *ULID) SetEntropy(e []byte) error { - if len(e) != 10 { - return ErrDataSize - } - - copy((*id)[6:], e) - return nil -} - -// Compare returns an integer comparing id and other lexicographically. -// The result will be 0 if id==other, -1 if id < other, and +1 if id > other. -func (id ULID) Compare(other ULID) int { - return bytes.Compare(id[:], other[:]) -} - -// Scan implements the sql.Scanner interface. It supports scanning -// a string or byte slice. -func (id *ULID) Scan(src interface{}) error { - switch x := src.(type) { - case nil: - return nil - case string: - return id.UnmarshalText([]byte(x)) - case []byte: - return id.UnmarshalBinary(x) - } - - return ErrScanValue -} - -// Value implements the sql/driver.Valuer interface. This returns the value -// represented as a byte slice. If instead a string is desirable, a wrapper -// type can be created that calls String(). -// -// // stringValuer wraps a ULID as a string-based driver.Valuer. -// type stringValuer ULID -// -// func (id stringValuer) Value() (driver.Value, error) { -// return ULID(id).String(), nil -// } -// -// // Example usage. -// db.Exec("...", stringValuer(id)) -func (id ULID) Value() (driver.Value, error) { - return id.MarshalBinary() -} - -// Monotonic returns an entropy source that is guaranteed to yield -// strictly increasing entropy bytes for the same ULID timestamp. -// On conflicts, the previous ULID entropy is incremented with a -// random number between 1 and `inc` (inclusive). -// -// The provided entropy source must actually yield random bytes or else -// monotonic reads are not guaranteed to terminate, since there isn't -// enough randomness to compute an increment number. -// -// When `inc == 0`, it'll be set to a secure default of `math.MaxUint32`. -// The lower the value of `inc`, the easier the next ULID within the -// same millisecond is to guess. If your code depends on ULIDs having -// secure entropy bytes, then don't go under this default unless you know -// what you're doing. -// -// The returned io.Reader isn't safe for concurrent use. -func Monotonic(entropy io.Reader, inc uint64) io.Reader { - m := monotonic{ - Reader: bufio.NewReader(entropy), - inc: inc, - } - - if m.inc == 0 { - m.inc = math.MaxUint32 - } - - if rng, ok := entropy.(*rand.Rand); ok { - m.rng = rng - } - - return &m -} - -type monotonic struct { - io.Reader - ms uint64 - inc uint64 - entropy uint80 - rand [8]byte - rng *rand.Rand -} - -func (m *monotonic) MonotonicRead(ms uint64, entropy []byte) (err error) { - if !m.entropy.IsZero() && m.ms == ms { - err = m.increment() - m.entropy.AppendTo(entropy) - } else if _, err = io.ReadFull(m.Reader, entropy); err == nil { - m.ms = ms - m.entropy.SetBytes(entropy) - } - return err -} - -// increment the previous entropy number with a random number -// of up to m.inc (inclusive). -func (m *monotonic) increment() error { - if inc, err := m.random(); err != nil { - return err - } else if m.entropy.Add(inc) { - return ErrMonotonicOverflow - } - return nil -} - -// random returns a uniform random value in [1, m.inc), reading entropy -// from m.Reader. When m.inc == 0 || m.inc == 1, it returns 1. -// Adapted from: https://golang.org/pkg/crypto/rand/#Int -func (m *monotonic) random() (inc uint64, err error) { - if m.inc <= 1 { - return 1, nil - } - - // Fast path for using a underlying rand.Rand directly. - if m.rng != nil { - // Range: [1, m.inc) - return 1 + uint64(m.rng.Int63n(int64(m.inc))), nil - } - - // bitLen is the maximum bit length needed to encode a value < m.inc. - bitLen := bits.Len64(m.inc) - - // byteLen is the maximum byte length needed to encode a value < m.inc. - byteLen := uint(bitLen+7) / 8 - - // msbitLen is the number of bits in the most significant byte of m.inc-1. - msbitLen := uint(bitLen % 8) - if msbitLen == 0 { - msbitLen = 8 - } - - for inc == 0 || inc >= m.inc { - if _, err = io.ReadFull(m.Reader, m.rand[:byteLen]); err != nil { - return 0, err - } - - // Clear bits in the first byte to increase the probability - // that the candidate is < m.inc. - m.rand[0] &= uint8(int(1< -Derek McGowan -Stephen J Day -Haibing Zhou diff --git a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml deleted file mode 100644 index b6165f83ca..0000000000 --- a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml +++ /dev/null @@ -1,28 +0,0 @@ -version: 2 - -requirements: - signed_off_by: - required: true - -always_pending: - title_regex: '^WIP' - explanation: 'Work in progress...' - -group_defaults: - required: 2 - approve_by_comment: - enabled: true - approve_regex: '^LGTM' - reject_regex: '^Rejected' - reset_on_push: - enabled: true - author_approval: - ignored: true - conditions: - branches: - - master - -groups: - go-digest: - teams: - - go-digest-maintainers diff --git a/vendor/github.com/opencontainers/go-digest/.travis.yml b/vendor/github.com/opencontainers/go-digest/.travis.yml deleted file mode 100644 index 5775f885c1..0000000000 --- a/vendor/github.com/opencontainers/go-digest/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -go: - - 1.12.x - - 1.13.x - - master diff --git a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md deleted file mode 100644 index e4d962ac16..0000000000 --- a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md +++ /dev/null @@ -1,72 +0,0 @@ -# Contributing to Docker open source projects - -Want to hack on this project? Awesome! Here are instructions to get you started. - -This project is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -For an in-depth description of our contribution process, visit the -contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -1 Letterman Drive -Suite D4700 -San Francisco, CA, 94129 - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE b/vendor/github.com/opencontainers/go-digest/LICENSE deleted file mode 100644 index 3ac8ab6487..0000000000 --- a/vendor/github.com/opencontainers/go-digest/LICENSE +++ /dev/null @@ -1,192 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2019, 2020 OCI Contributors - Copyright 2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.docs b/vendor/github.com/opencontainers/go-digest/LICENSE.docs deleted file mode 100644 index e26cd4fc8e..0000000000 --- a/vendor/github.com/opencontainers/go-digest/LICENSE.docs +++ /dev/null @@ -1,425 +0,0 @@ -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS deleted file mode 100644 index 843b1b2061..0000000000 --- a/vendor/github.com/opencontainers/go-digest/MAINTAINERS +++ /dev/null @@ -1,5 +0,0 @@ -Derek McGowan (@dmcgowan) -Stephen Day (@stevvooe) -Vincent Batts (@vbatts) -Akihiro Suda (@AkihiroSuda) -Sebastiaan van Stijn (@thaJeztah) diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md deleted file mode 100644 index a11287207e..0000000000 --- a/vendor/github.com/opencontainers/go-digest/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# go-digest - -[![GoDoc](https://godoc.org/github.com/opencontainers/go-digest?status.svg)](https://godoc.org/github.com/opencontainers/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/go-digest)](https://goreportcard.com/report/github.com/opencontainers/go-digest) [![Build Status](https://travis-ci.org/opencontainers/go-digest.svg?branch=master)](https://travis-ci.org/opencontainers/go-digest) - -Common digest package used across the container ecosystem. - -Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information. - -# What is a digest? - -A digest is just a [hash](https://en.wikipedia.org/wiki/Hash_function). - -The most common use case for a digest is to create a content identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) systems: - -```go -id := digest.FromBytes([]byte("my content")) -``` - -In the example above, the id can be used to uniquely identify the byte slice "my content". -This allows two disparate applications to agree on a verifiable identifier without having to trust one another. - -An identifying digest can be verified, as follows: - -```go -if id != digest.FromBytes([]byte("my content")) { - return errors.New("the content has changed!") -} -``` - -A `Verifier` type can be used to handle cases where an `io.Reader` makes more sense: - -```go -rd := getContent() -verifier := id.Verifier() -io.Copy(verifier, rd) - -if !verifier.Verified() { - return errors.New("the content has changed!") -} -``` - -Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this can power a rich, safe, content distribution system. - -# Usage - -While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is considered the best resource, a few important items need to be called out when using this package. - -1. Make sure to import the hash implementations into your application or the package will panic. - You should have something like the following in the main (or other entrypoint) of your application: - - ```go - import ( - _ "crypto/sha256" - _ "crypto/sha512" - ) - ``` - This may seem inconvenient but it allows you replace the hash - implementations with others, such as https://github.com/stevvooe/resumable. - -2. Even though `digest.Digest` may be assemblable as a string, _always_ verify your input with `digest.Parse` or use `Digest.Validate` when accepting untrusted input. - While there are measures to avoid common problems, this will ensure you have valid digests in the rest of your application. - -3. While alternative encodings of hash values (digests) are possible (for example, base64), this package deals exclusively with hex-encoded digests. - -# Stability - -The Go API, at this stage, is considered stable, unless otherwise noted. - -As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest). - -# Contributing - -This package is considered fairly complete. -It has been in production in thousands (millions?) of deployments and is fairly battle-hardened. -New additions will be met with skepticism. -If you think there is a missing feature, please file a bug clearly describing the problem and the alternatives you tried before submitting a PR. - -## Code of Conduct - -Participation in the OpenContainers community is governed by [OpenContainer's Code of Conduct][code-of-conduct]. - -## Security - -If you find an issue, please follow the [security][security] protocol to report it. - -# Copyright and license - -Copyright © 2019, 2020 OCI Contributors -Copyright © 2016 Docker, Inc. -All rights reserved, except as follows. -Code is released under the [Apache 2.0 license](LICENSE). -This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). -You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. - -[security]: https://github.com/opencontainers/org/blob/master/security -[code-of-conduct]: https://github.com/opencontainers/org/blob/master/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go deleted file mode 100644 index 490951dc3f..0000000000 --- a/vendor/github.com/opencontainers/go-digest/algorithm.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2019, 2020 OCI Contributors -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digest - -import ( - "crypto" - "fmt" - "hash" - "io" - "regexp" -) - -// Algorithm identifies and implementation of a digester by an identifier. -// Note the that this defines both the hash algorithm used and the string -// encoding. -type Algorithm string - -// supported digest types -const ( - SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only) - SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only) - SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only) - - // Canonical is the primary digest algorithm used with the distribution - // project. Other digests may be used but this one is the primary storage - // digest. - Canonical = SHA256 -) - -var ( - // TODO(stevvooe): Follow the pattern of the standard crypto package for - // registration of digests. Effectively, we are a registerable set and - // common symbol access. - - // algorithms maps values to hash.Hash implementations. Other algorithms - // may be available but they cannot be calculated by the digest package. - algorithms = map[Algorithm]crypto.Hash{ - SHA256: crypto.SHA256, - SHA384: crypto.SHA384, - SHA512: crypto.SHA512, - } - - // anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests. - // Note that /A-F/ disallowed. - anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{ - SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`), - SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`), - SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`), - } -) - -// Available returns true if the digest type is available for use. If this -// returns false, Digester and Hash will return nil. -func (a Algorithm) Available() bool { - h, ok := algorithms[a] - if !ok { - return false - } - - // check availability of the hash, as well - return h.Available() -} - -func (a Algorithm) String() string { - return string(a) -} - -// Size returns number of bytes returned by the hash. -func (a Algorithm) Size() int { - h, ok := algorithms[a] - if !ok { - return 0 - } - return h.Size() -} - -// Set implemented to allow use of Algorithm as a command line flag. -func (a *Algorithm) Set(value string) error { - if value == "" { - *a = Canonical - } else { - // just do a type conversion, support is queried with Available. - *a = Algorithm(value) - } - - if !a.Available() { - return ErrDigestUnsupported - } - - return nil -} - -// Digester returns a new digester for the specified algorithm. If the algorithm -// does not have a digester implementation, nil will be returned. This can be -// checked by calling Available before calling Digester. -func (a Algorithm) Digester() Digester { - return &digester{ - alg: a, - hash: a.Hash(), - } -} - -// Hash returns a new hash as used by the algorithm. If not available, the -// method will panic. Check Algorithm.Available() before calling. -func (a Algorithm) Hash() hash.Hash { - if !a.Available() { - // Empty algorithm string is invalid - if a == "" { - panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()")) - } - - // NOTE(stevvooe): A missing hash is usually a programming error that - // must be resolved at compile time. We don't import in the digest - // package to allow users to choose their hash implementation (such as - // when using stevvooe/resumable or a hardware accelerated package). - // - // Applications that may want to resolve the hash at runtime should - // call Algorithm.Available before call Algorithm.Hash(). - panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) - } - - return algorithms[a].New() -} - -// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into -// the encoded portion of the digest. -func (a Algorithm) Encode(d []byte) string { - // TODO(stevvooe): Currently, all algorithms use a hex encoding. When we - // add support for back registration, we can modify this accordingly. - return fmt.Sprintf("%x", d) -} - -// FromReader returns the digest of the reader using the algorithm. -func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { - digester := a.Digester() - - if _, err := io.Copy(digester.Hash(), rd); err != nil { - return "", err - } - - return digester.Digest(), nil -} - -// FromBytes digests the input and returns a Digest. -func (a Algorithm) FromBytes(p []byte) Digest { - digester := a.Digester() - - if _, err := digester.Hash().Write(p); err != nil { - // Writes to a Hash should never fail. None of the existing - // hash implementations in the stdlib or hashes vendored - // here can return errors from Write. Having a panic in this - // condition instead of having FromBytes return an error value - // avoids unnecessary error handling paths in all callers. - panic("write to hash function returned error: " + err.Error()) - } - - return digester.Digest() -} - -// FromString digests the string input and returns a Digest. -func (a Algorithm) FromString(s string) Digest { - return a.FromBytes([]byte(s)) -} - -// Validate validates the encoded portion string -func (a Algorithm) Validate(encoded string) error { - r, ok := anchoredEncodedRegexps[a] - if !ok { - return ErrDigestUnsupported - } - // Digests much always be hex-encoded, ensuring that their hex portion will - // always be size*2 - if a.Size()*2 != len(encoded) { - return ErrDigestInvalidLength - } - if r.MatchString(encoded) { - return nil - } - return ErrDigestInvalidFormat -} diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go deleted file mode 100644 index 518b5e7154..0000000000 --- a/vendor/github.com/opencontainers/go-digest/digest.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2019, 2020 OCI Contributors -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digest - -import ( - "fmt" - "hash" - "io" - "regexp" - "strings" -) - -// Digest allows simple protection of hex formatted digest strings, prefixed -// by their algorithm. Strings of type Digest have some guarantee of being in -// the correct format and it provides quick access to the components of a -// digest string. -// -// The following is an example of the contents of Digest types: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// This allows to abstract the digest behind this type and work only in those -// terms. -type Digest string - -// NewDigest returns a Digest from alg and a hash.Hash object. -func NewDigest(alg Algorithm, h hash.Hash) Digest { - return NewDigestFromBytes(alg, h.Sum(nil)) -} - -// NewDigestFromBytes returns a new digest from the byte contents of p. -// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) -// functions. This is also useful for rebuilding digests from binary -// serializations. -func NewDigestFromBytes(alg Algorithm, p []byte) Digest { - return NewDigestFromEncoded(alg, alg.Encode(p)) -} - -// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded. -func NewDigestFromHex(alg, hex string) Digest { - return NewDigestFromEncoded(Algorithm(alg), hex) -} - -// NewDigestFromEncoded returns a Digest from alg and the encoded digest. -func NewDigestFromEncoded(alg Algorithm, encoded string) Digest { - return Digest(fmt.Sprintf("%s:%s", alg, encoded)) -} - -// DigestRegexp matches valid digest types. -var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`) - -// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. -var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) - -var ( - // ErrDigestInvalidFormat returned when digest format invalid. - ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") - - // ErrDigestInvalidLength returned when digest has invalid length. - ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") - - // ErrDigestUnsupported returned when the digest algorithm is unsupported. - ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") -) - -// Parse parses s and returns the validated digest object. An error will -// be returned if the format is invalid. -func Parse(s string) (Digest, error) { - d := Digest(s) - return d, d.Validate() -} - -// FromReader consumes the content of rd until io.EOF, returning canonical digest. -func FromReader(rd io.Reader) (Digest, error) { - return Canonical.FromReader(rd) -} - -// FromBytes digests the input and returns a Digest. -func FromBytes(p []byte) Digest { - return Canonical.FromBytes(p) -} - -// FromString digests the input and returns a Digest. -func FromString(s string) Digest { - return Canonical.FromString(s) -} - -// Validate checks that the contents of d is a valid digest, returning an -// error if not. -func (d Digest) Validate() error { - s := string(d) - i := strings.Index(s, ":") - if i <= 0 || i+1 == len(s) { - return ErrDigestInvalidFormat - } - algorithm, encoded := Algorithm(s[:i]), s[i+1:] - if !algorithm.Available() { - if !DigestRegexpAnchored.MatchString(s) { - return ErrDigestInvalidFormat - } - return ErrDigestUnsupported - } - return algorithm.Validate(encoded) -} - -// Algorithm returns the algorithm portion of the digest. This will panic if -// the underlying digest is not in a valid format. -func (d Digest) Algorithm() Algorithm { - return Algorithm(d[:d.sepIndex()]) -} - -// Verifier returns a writer object that can be used to verify a stream of -// content against the digest. If the digest is invalid, the method will panic. -func (d Digest) Verifier() Verifier { - return hashVerifier{ - hash: d.Algorithm().Hash(), - digest: d, - } -} - -// Encoded returns the encoded portion of the digest. This will panic if the -// underlying digest is not in a valid format. -func (d Digest) Encoded() string { - return string(d[d.sepIndex()+1:]) -} - -// Hex is deprecated. Please use Digest.Encoded. -func (d Digest) Hex() string { - return d.Encoded() -} - -func (d Digest) String() string { - return string(d) -} - -func (d Digest) sepIndex() int { - i := strings.Index(string(d), ":") - - if i < 0 { - panic(fmt.Sprintf("no ':' separator in digest %q", d)) - } - - return i -} diff --git a/vendor/github.com/opencontainers/go-digest/digester.go b/vendor/github.com/opencontainers/go-digest/digester.go deleted file mode 100644 index ede9077571..0000000000 --- a/vendor/github.com/opencontainers/go-digest/digester.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019, 2020 OCI Contributors -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digest - -import "hash" - -// Digester calculates the digest of written data. Writes should go directly -// to the return value of Hash, while calling Digest will return the current -// value of the digest. -type Digester interface { - Hash() hash.Hash // provides direct access to underlying hash instance. - Digest() Digest -} - -// digester provides a simple digester definition that embeds a hasher. -type digester struct { - alg Algorithm - hash hash.Hash -} - -func (d *digester) Hash() hash.Hash { - return d.hash -} - -func (d *digester) Digest() Digest { - return NewDigest(d.alg, d.hash) -} diff --git a/vendor/github.com/opencontainers/go-digest/doc.go b/vendor/github.com/opencontainers/go-digest/doc.go deleted file mode 100644 index 83d3a936ca..0000000000 --- a/vendor/github.com/opencontainers/go-digest/doc.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2019, 2020 OCI Contributors -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package digest provides a generalized type to opaquely represent message -// digests and their operations within the registry. The Digest type is -// designed to serve as a flexible identifier in a content-addressable system. -// More importantly, it provides tools and wrappers to work with -// hash.Hash-based digests with little effort. -// -// Basics -// -// The format of a digest is simply a string with two parts, dubbed the -// "algorithm" and the "digest", separated by a colon: -// -// : -// -// An example of a sha256 digest representation follows: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// The "algorithm" portion defines both the hashing algorithm used to calculate -// the digest and the encoding of the resulting digest, which defaults to "hex" -// if not otherwise specified. Currently, all supported algorithms have their -// digests encoded in hex strings. -// -// In the example above, the string "sha256" is the algorithm and the hex bytes -// are the "digest". -// -// Because the Digest type is simply a string, once a valid Digest is -// obtained, comparisons are cheap, quick and simple to express with the -// standard equality operator. -// -// Verification -// -// The main benefit of using the Digest type is simple verification against a -// given digest. The Verifier interface, modeled after the stdlib hash.Hash -// interface, provides a common write sink for digest verification. After -// writing is complete, calling the Verifier.Verified method will indicate -// whether or not the stream of bytes matches the target digest. -// -// Missing Features -// -// In addition to the above, we intend to add the following features to this -// package: -// -// 1. A Digester type that supports write sink digest calculation. -// -// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. -// -package digest diff --git a/vendor/github.com/opencontainers/go-digest/verifiers.go b/vendor/github.com/opencontainers/go-digest/verifiers.go deleted file mode 100644 index afef506f46..0000000000 --- a/vendor/github.com/opencontainers/go-digest/verifiers.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2019, 2020 OCI Contributors -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digest - -import ( - "hash" - "io" -) - -// Verifier presents a general verification interface to be used with message -// digests and other byte stream verifications. Users instantiate a Verifier -// from one of the various methods, write the data under test to it then check -// the result with the Verified method. -type Verifier interface { - io.Writer - - // Verified will return true if the content written to Verifier matches - // the digest. - Verified() bool -} - -type hashVerifier struct { - digest Digest - hash hash.Hash -} - -func (hv hashVerifier) Write(p []byte) (n int, err error) { - return hv.hash.Write(p) -} - -func (hv hashVerifier) Verified() bool { - return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) -} diff --git a/vendor/github.com/opencontainers/image-spec/LICENSE b/vendor/github.com/opencontainers/image-spec/LICENSE deleted file mode 100644 index 9fdc20fdb6..0000000000 --- a/vendor/github.com/opencontainers/image-spec/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2016 The Linux Foundation. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go deleted file mode 100644 index 581cf7cdfa..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -const ( - // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). - AnnotationCreated = "org.opencontainers.image.created" - - // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). - AnnotationAuthors = "org.opencontainers.image.authors" - - // AnnotationURL is the annotation key for the URL to find more information on the image. - AnnotationURL = "org.opencontainers.image.url" - - // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. - AnnotationDocumentation = "org.opencontainers.image.documentation" - - // AnnotationSource is the annotation key for the URL to get source code for building the image. - AnnotationSource = "org.opencontainers.image.source" - - // AnnotationVersion is the annotation key for the version of the packaged software. - // The version MAY match a label or tag in the source code repository. - // The version MAY be Semantic versioning-compatible. - AnnotationVersion = "org.opencontainers.image.version" - - // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. - AnnotationRevision = "org.opencontainers.image.revision" - - // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. - AnnotationVendor = "org.opencontainers.image.vendor" - - // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. - AnnotationLicenses = "org.opencontainers.image.licenses" - - // AnnotationRefName is the annotation key for the name of the reference for a target. - // SHOULD only be considered valid when on descriptors on `index.json` within image layout. - AnnotationRefName = "org.opencontainers.image.ref.name" - - // AnnotationTitle is the annotation key for the human-readable title of the image. - AnnotationTitle = "org.opencontainers.image.title" - - // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. - AnnotationDescription = "org.opencontainers.image.description" - - // AnnotationBaseImageDigest is the annotation key for the digest of the image's base image. - AnnotationBaseImageDigest = "org.opencontainers.image.base.digest" - - // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. - AnnotationBaseImageName = "org.opencontainers.image.base.name" -) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go deleted file mode 100644 index 36b0aeb8f1..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "time" - - digest "github.com/opencontainers/go-digest" -) - -// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. -type ImageConfig struct { - // User defines the username or UID which the process in the container should run as. - User string `json:"User,omitempty"` - - // ExposedPorts a set of ports to expose from a container running this image. - ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` - - // Env is a list of environment variables to be used in a container. - Env []string `json:"Env,omitempty"` - - // Entrypoint defines a list of arguments to use as the command to execute when the container starts. - Entrypoint []string `json:"Entrypoint,omitempty"` - - // Cmd defines the default arguments to the entrypoint of the container. - Cmd []string `json:"Cmd,omitempty"` - - // Volumes is a set of directories describing where the process is likely write data specific to a container instance. - Volumes map[string]struct{} `json:"Volumes,omitempty"` - - // WorkingDir sets the current working directory of the entrypoint process in the container. - WorkingDir string `json:"WorkingDir,omitempty"` - - // Labels contains arbitrary metadata for the container. - Labels map[string]string `json:"Labels,omitempty"` - - // StopSignal contains the system call signal that will be sent to the container to exit. - StopSignal string `json:"StopSignal,omitempty"` - - // ArgsEscaped - // - // Deprecated: This field is present only for legacy compatibility with - // Docker and should not be used by new image builders. It is used by Docker - // for Windows images to indicate that the `Entrypoint` or `Cmd` or both, - // contains only a single element array, that is a pre-escaped, and combined - // into a single string `CommandLine`. If `true` the value in `Entrypoint` or - // `Cmd` should be used as-is to avoid double escaping. - // https://github.com/opencontainers/image-spec/pull/892 - ArgsEscaped bool `json:"ArgsEscaped,omitempty"` -} - -// RootFS describes a layer content addresses -type RootFS struct { - // Type is the type of the rootfs. - Type string `json:"type"` - - // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. - DiffIDs []digest.Digest `json:"diff_ids"` -} - -// History describes the history of a layer. -type History struct { - // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. - Created *time.Time `json:"created,omitempty"` - - // CreatedBy is the command which created the layer. - CreatedBy string `json:"created_by,omitempty"` - - // Author is the author of the build point. - Author string `json:"author,omitempty"` - - // Comment is a custom message set when creating the layer. - Comment string `json:"comment,omitempty"` - - // EmptyLayer is used to mark if the history item created a filesystem diff. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Image is the JSON structure which describes some basic information about the image. -// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. -type Image struct { - // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. - Created *time.Time `json:"created,omitempty"` - - // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. - Author string `json:"author,omitempty"` - - // Platform describes the platform which the image in the manifest runs on. - Platform - - // Config defines the execution parameters which should be used as a base when running a container using the image. - Config ImageConfig `json:"config,omitempty"` - - // RootFS references the layer content addresses used by the image. - RootFS RootFS `json:"rootfs"` - - // History describes the history of each layer. - History []History `json:"history,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go deleted file mode 100644 index 1881b11814..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016-2022 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import digest "github.com/opencontainers/go-digest" - -// Descriptor describes the disposition of targeted content. -// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype -// when marshalled to JSON. -type Descriptor struct { - // MediaType is the media type of the object this schema refers to. - MediaType string `json:"mediaType"` - - // Digest is the digest of the targeted content. - Digest digest.Digest `json:"digest"` - - // Size specifies the size in bytes of the blob. - Size int64 `json:"size"` - - // URLs specifies a list of URLs from which this object MAY be downloaded - URLs []string `json:"urls,omitempty"` - - // Annotations contains arbitrary metadata relating to the targeted content. - Annotations map[string]string `json:"annotations,omitempty"` - - // Data is an embedding of the targeted content. This is encoded as a base64 - // string when marshalled to JSON (automatically, by encoding/json). If - // present, Data can be used directly to avoid fetching the targeted content. - Data []byte `json:"data,omitempty"` - - // Platform describes the platform which the image in the manifest runs on. - // - // This should only be used when referring to a manifest. - Platform *Platform `json:"platform,omitempty"` - - // ArtifactType is the IANA media type of this artifact. - ArtifactType string `json:"artifactType,omitempty"` -} - -// Platform describes the platform which the image in the manifest runs on. -type Platform struct { - // Architecture field specifies the CPU architecture, for example - // `amd64` or `ppc64le`. - Architecture string `json:"architecture"` - - // OS specifies the operating system, for example `linux` or `windows`. - OS string `json:"os"` - - // OSVersion is an optional field specifying the operating system - // version, for example on Windows `10.0.14393.1066`. - OSVersion string `json:"os.version,omitempty"` - - // OSFeatures is an optional field specifying an array of strings, - // each listing a required OS feature (for example on Windows `win32k`). - OSFeatures []string `json:"os.features,omitempty"` - - // Variant is an optional field specifying a variant of the CPU, for - // example `v7` to specify ARMv7 when architecture is `arm`. - Variant string `json:"variant,omitempty"` -} - -// DescriptorEmptyJSON is the descriptor of a blob with content of `{}`. -var DescriptorEmptyJSON = Descriptor{ - MediaType: MediaTypeEmptyJSON, - Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, - Size: 2, - Data: []byte(`{}`), -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go deleted file mode 100644 index e2bed9d4e4..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import "github.com/opencontainers/image-spec/specs-go" - -// Index references manifests for various platforms. -// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. -type Index struct { - specs.Versioned - - // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` - MediaType string `json:"mediaType,omitempty"` - - // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. - ArtifactType string `json:"artifactType,omitempty"` - - // Manifests references platform specific manifests. - Manifests []Descriptor `json:"manifests"` - - // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. - Subject *Descriptor `json:"subject,omitempty"` - - // Annotations contains arbitrary metadata for the image index. - Annotations map[string]string `json:"annotations,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go deleted file mode 100644 index c5503cb305..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -const ( - // ImageLayoutFile is the file name containing ImageLayout in an OCI Image Layout - ImageLayoutFile = "oci-layout" - // ImageLayoutVersion is the version of ImageLayout - ImageLayoutVersion = "1.0.0" - // ImageIndexFile is the file name of the entry point for references and descriptors in an OCI Image Layout - ImageIndexFile = "index.json" - // ImageBlobsDir is the directory name containing content addressable blobs in an OCI Image Layout - ImageBlobsDir = "blobs" -) - -// ImageLayout is the structure in the "oci-layout" file, found in the root -// of an OCI Image-layout directory. -type ImageLayout struct { - Version string `json:"imageLayoutVersion"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go deleted file mode 100644 index 26fec52a6b..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2016-2022 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import "github.com/opencontainers/image-spec/specs-go" - -// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. -type Manifest struct { - specs.Versioned - - // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` - MediaType string `json:"mediaType,omitempty"` - - // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. - ArtifactType string `json:"artifactType,omitempty"` - - // Config references a configuration object for a container, by digest. - // The referenced configuration object is a JSON blob that the runtime uses to set up the container. - Config Descriptor `json:"config"` - - // Layers is an indexed list of layers referenced by the manifest. - Layers []Descriptor `json:"layers"` - - // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. - Subject *Descriptor `json:"subject,omitempty"` - - // Annotations contains arbitrary metadata for the image manifest. - Annotations map[string]string `json:"annotations,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go deleted file mode 100644 index ce8313e796..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -const ( - // MediaTypeDescriptor specifies the media type for a content descriptor. - MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" - - // MediaTypeLayoutHeader specifies the media type for the oci-layout. - MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" - - // MediaTypeImageIndex specifies the media type for an image index. - MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" - - // MediaTypeImageManifest specifies the media type for an image manifest. - MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" - - // MediaTypeImageConfig specifies the media type for the image configuration. - MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" - - // MediaTypeEmptyJSON specifies the media type for an unused blob containing the value "{}". - MediaTypeEmptyJSON = "application/vnd.oci.empty.v1+json" -) - -const ( - // MediaTypeImageLayer is the media type used for layers referenced by the manifest. - MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" - - // MediaTypeImageLayerGzip is the media type used for gzipped layers - // referenced by the manifest. - MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" - - // MediaTypeImageLayerZstd is the media type used for zstd compressed - // layers referenced by the manifest. - MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" -) - -// Non-distributable layer media-types. -// -// Deprecated: Non-distributable layers are deprecated, and not recommended -// for future use. Implementations SHOULD NOT produce new non-distributable -// layers. -// https://github.com/opencontainers/image-spec/pull/965 -const ( - // MediaTypeImageLayerNonDistributable is the media type for layers referenced by - // the manifest but with distribution restrictions. - // - // Deprecated: Non-distributable layers are deprecated, and not recommended - // for future use. Implementations SHOULD NOT produce new non-distributable - // layers. - // https://github.com/opencontainers/image-spec/pull/965 - MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" - - // MediaTypeImageLayerNonDistributableGzip is the media type for - // gzipped layers referenced by the manifest but with distribution - // restrictions. - // - // Deprecated: Non-distributable layers are deprecated, and not recommended - // for future use. Implementations SHOULD NOT produce new non-distributable - // layers. - // https://github.com/opencontainers/image-spec/pull/965 - MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" - - // MediaTypeImageLayerNonDistributableZstd is the media type for zstd - // compressed layers referenced by the manifest but with distribution - // restrictions. - // - // Deprecated: Non-distributable layers are deprecated, and not recommended - // for future use. Implementations SHOULD NOT produce new non-distributable - // layers. - // https://github.com/opencontainers/image-spec/pull/965 - MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" -) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go deleted file mode 100644 index 7069ae44d7..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package specs - -import "fmt" - -const ( - // VersionMajor is for an API incompatible changes - VersionMajor = 1 - // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 1 - // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 - - // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" -) - -// Version is the specification version that the package types support. -var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go deleted file mode 100644 index 58a1510f33..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package specs - -// Versioned provides a struct with the manifest schemaVersion and mediaType. -// Incoming content with unknown schema version can be decoded against this -// struct to check the version. -type Versioned struct { - // SchemaVersion is the image manifest schema that this image follows - SchemaVersion int `json:"schemaVersion"` -} diff --git a/vendor/github.com/pelletier/go-toml/.dockerignore b/vendor/github.com/pelletier/go-toml/.dockerignore deleted file mode 100644 index 7b5883475d..0000000000 --- a/vendor/github.com/pelletier/go-toml/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -cmd/tomll/tomll -cmd/tomljson/tomljson diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore deleted file mode 100644 index e6ba63a5c5..0000000000 --- a/vendor/github.com/pelletier/go-toml/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -test_program/test_program_bin -fuzz/ -cmd/tomll/tomll -cmd/tomljson/tomljson -cmd/tomltestgen/tomltestgen diff --git a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md deleted file mode 100644 index 98b9893d37..0000000000 --- a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md +++ /dev/null @@ -1,132 +0,0 @@ -## Contributing - -Thank you for your interest in go-toml! We appreciate you considering -contributing to go-toml! - -The main goal is the project is to provide an easy-to-use TOML -implementation for Go that gets the job done and gets out of your way – -dealing with TOML is probably not the central piece of your project. - -As the single maintainer of go-toml, time is scarce. All help, big or -small, is more than welcomed! - -### Ask questions - -Any question you may have, somebody else might have it too. Always feel -free to ask them on the [issues tracker][issues-tracker]. We will try to -answer them as clearly and quickly as possible, time permitting. - -Asking questions also helps us identify areas where the documentation needs -improvement, or new features that weren't envisioned before. Sometimes, a -seemingly innocent question leads to the fix of a bug. Don't hesitate and -ask away! - -### Improve the documentation - -The best way to share your knowledge and experience with go-toml is to -improve the documentation. Fix a typo, clarify an interface, add an -example, anything goes! - -The documentation is present in the [README][readme] and thorough the -source code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a -change to the documentation, create a pull request with your proposed -changes. For simple changes like that, the easiest way to go is probably -the "Fork this project and edit the file" button on Github, displayed at -the top right of the file. Unless it's a trivial change (for example a -typo), provide a little bit of context in your pull request description or -commit message. - -### Report a bug - -Found a bug! Sorry to hear that :(. Help us and other track them down and -fix by reporting it. [File a new bug report][bug-report] on the [issues -tracker][issues-tracker]. The template should provide enough guidance on -what to include. When in doubt: add more details! By reducing ambiguity and -providing more information, it decreases back and forth and saves everyone -time. - -### Code changes - -Want to contribute a patch? Very happy to hear that! - -First, some high-level rules: - -* A short proposal with some POC code is better than a lengthy piece of - text with no code. Code speaks louder than words. -* No backward-incompatible patch will be accepted unless discussed. - Sometimes it's hard, and Go's lack of versioning by default does not - help, but we try not to break people's programs unless we absolutely have - to. -* If you are writing a new feature or extending an existing one, make sure - to write some documentation. -* Bug fixes need to be accompanied with regression tests. -* New code needs to be tested. -* Your commit messages need to explain why the change is needed, even if - already included in the PR description. - -It does sound like a lot, but those best practices are here to save time -overall and continuously improve the quality of the project, which is -something everyone benefits from. - -#### Get started - -The fairly standard code contribution process looks like that: - -1. [Fork the project][fork]. -2. Make your changes, commit on any branch you like. -3. [Open up a pull request][pull-request] -4. Review, potential ask for changes. -5. Merge. You're in! - -Feel free to ask for help! You can create draft pull requests to gather -some early feedback! - -#### Run the tests - -You can run tests for go-toml using Go's test tool: `go test ./...`. -When creating a pull requests, all tests will be ran on Linux on a few Go -versions (Travis CI), and on Windows using the latest Go version -(AppVeyor). - -#### Style - -Try to look around and follow the same format and structure as the rest of -the code. We enforce using `go fmt` on the whole code base. - ---- - -### Maintainers-only - -#### Merge pull request - -Checklist: - -* Passing CI. -* Does not introduce backward-incompatible changes (unless discussed). -* Has relevant doc changes. -* Has relevant unit tests. - -1. Merge using "squash and merge". -2. Make sure to edit the commit message to keep all the useful information - nice and clean. -3. Make sure the commit title is clear and contains the PR number (#123). - -#### New release - -1. Go to [releases][releases]. Click on "X commits to master since this - release". -2. Make note of all the changes. Look for backward incompatible changes, - new features, and bug fixes. -3. Pick the new version using the above and semver. -4. Create a [new release][new-release]. -5. Follow the same format as [1.1.0][release-110]. - -[issues-tracker]: https://github.com/pelletier/go-toml/issues -[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md -[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml -[readme]: ./README.md -[fork]: https://help.github.com/articles/fork-a-repo -[pull-request]: https://help.github.com/en/articles/creating-a-pull-request -[releases]: https://github.com/pelletier/go-toml/releases -[new-release]: https://github.com/pelletier/go-toml/releases/new -[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile deleted file mode 100644 index fffdb01666..0000000000 --- a/vendor/github.com/pelletier/go-toml/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM golang:1.12-alpine3.9 as builder -WORKDIR /go/src/github.com/pelletier/go-toml -COPY . . -ENV CGO_ENABLED=0 -ENV GOOS=linux -RUN go install ./... - -FROM scratch -COPY --from=builder /go/bin/tomll /usr/bin/tomll -COPY --from=builder /go/bin/tomljson /usr/bin/tomljson -COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE deleted file mode 100644 index f414553c21..0000000000 --- a/vendor/github.com/pelletier/go-toml/LICENSE +++ /dev/null @@ -1,247 +0,0 @@ -The bulk of github.com/pelletier/go-toml is distributed under the MIT license -(see below), with the exception of localtime.go and localtime.test.go. -Those two files have been copied over from Google's civil library at revision -ed46f5086358513cf8c25f8e3f022cb838a49d66, and are distributed under the Apache -2.0 license (see below). - - -github.com/pelletier/go-toml: - - -The MIT License (MIT) - -Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -localtime.go, localtime_test.go: - -Originals: - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil_test.go -Changes: - * Renamed files from civil* to localtime*. - * Package changed from civil to toml. - * 'Local' prefix added to all structs. -License: - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/LICENSE - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/pelletier/go-toml/Makefile b/vendor/github.com/pelletier/go-toml/Makefile deleted file mode 100644 index 9e4503aea6..0000000000 --- a/vendor/github.com/pelletier/go-toml/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -export CGO_ENABLED=0 -go := go -go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1) -go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2) - -out.tools := tomll tomljson jsontoml -out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz) -sources := $(wildcard **/*.go) - - -.PHONY: -tools: $(out.tools) - -$(out.tools): $(sources) - GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@ - -.PHONY: -dist: $(out.dist) - -$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: % - if [ "$(go.goos)" = "windows" ]; then \ - tar -cJf $@ $^.exe; \ - else \ - tar -cJf $@ $^; \ - fi - -.PHONY: -clean: - rm -rf $(out.tools) $(out.dist) diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 041cdc4a2f..0000000000 --- a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,5 +0,0 @@ -**Issue:** add link to pelletier/go-toml issue here - -Explanation of what this pull request does. - -More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md deleted file mode 100644 index 7399e04bf6..0000000000 --- a/vendor/github.com/pelletier/go-toml/README.md +++ /dev/null @@ -1,176 +0,0 @@ -# go-toml - -Go library for the [TOML](https://toml.io/) format. - -This library supports TOML version -[v1.0.0-rc.3](https://toml.io/en/v1.0.0-rc.3) - -[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml.svg)](https://pkg.go.dev/github.com/pelletier/go-toml) -[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) -[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) -[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) -[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) - - -## Development status - -**ℹ️ Consider go-toml v2!** - -The next version of go-toml is in [active development][v2-dev], and -[nearing completion][v2-map]. - -Though technically in beta, v2 is already more tested, [fixes bugs][v1-bugs], -and [much faster][v2-bench]. If you only need reading and writing TOML documents -(majority of cases), those features are implemented and the API unlikely to -change. - -The remaining features will be added shortly. While pull-requests are welcome on -v1, no active development is expected on it. When v2.0.0 is released, v1 will be -deprecated. - -👉 [go-toml v2][v2] - -[v2]: https://github.com/pelletier/go-toml/tree/v2 -[v2-map]: https://github.com/pelletier/go-toml/discussions/506 -[v2-dev]: https://github.com/pelletier/go-toml/tree/v2 -[v1-bugs]: https://github.com/pelletier/go-toml/issues?q=is%3Aissue+is%3Aopen+label%3Av2-fixed -[v2-bench]: https://github.com/pelletier/go-toml/tree/v2#benchmarks - -## Features - -Go-toml provides the following features for using data parsed from TOML documents: - -* Load TOML documents from files and string data -* Easily navigate TOML structure using Tree -* Marshaling and unmarshaling to and from data structures -* Line & column position data for all parsed elements -* [Query support similar to JSON-Path](query/) -* Syntax errors contain line and column numbers - -## Import - -```go -import "github.com/pelletier/go-toml" -``` - -## Usage example - -Read a TOML document: - -```go -config, _ := toml.Load(` -[postgres] -user = "pelletier" -password = "mypassword"`) -// retrieve data directly -user := config.Get("postgres.user").(string) - -// or using an intermediate object -postgresConfig := config.Get("postgres").(*toml.Tree) -password := postgresConfig.Get("password").(string) -``` - -Or use Unmarshal: - -```go -type Postgres struct { - User string - Password string -} -type Config struct { - Postgres Postgres -} - -doc := []byte(` -[Postgres] -User = "pelletier" -Password = "mypassword"`) - -config := Config{} -toml.Unmarshal(doc, &config) -fmt.Println("user=", config.Postgres.User) -``` - -Or use a query: - -```go -// use a query to gather elements without walking the tree -q, _ := query.Compile("$..[user,password]") -results := q.Execute(config) -for ii, item := range results.Values() { - fmt.Printf("Query result %d: %v\n", ii, item) -} -``` - -## Documentation - -The documentation and additional examples are available at -[pkg.go.dev](https://pkg.go.dev/github.com/pelletier/go-toml). - -## Tools - -Go-toml provides three handy command line tools: - -* `tomll`: Reads TOML files and lints them. - - ``` - go install github.com/pelletier/go-toml/cmd/tomll - tomll --help - ``` -* `tomljson`: Reads a TOML file and outputs its JSON representation. - - ``` - go install github.com/pelletier/go-toml/cmd/tomljson - tomljson --help - ``` - - * `jsontoml`: Reads a JSON file and outputs a TOML representation. - - ``` - go install github.com/pelletier/go-toml/cmd/jsontoml - jsontoml --help - ``` - -### Docker image - -Those tools are also available as a Docker image from -[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to -use `tomljson`: - -``` -docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml -``` - -Only master (`latest`) and tagged versions are published to dockerhub. You -can build your own image as usual: - -``` -docker build -t go-toml . -``` - -## Contribute - -Feel free to report bugs and patches using GitHub's pull requests system on -[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be -much appreciated! - -### Run tests - -`go test ./...` - -### Fuzzing - -The script `./fuzz.sh` is available to -run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml. - -## Versioning - -Go-toml follows [Semantic Versioning](http://semver.org/). The supported version -of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of -this document. The last two major versions of Go are supported -(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). - -## License - -The MIT License (MIT) + Apache 2.0. Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/SECURITY.md b/vendor/github.com/pelletier/go-toml/SECURITY.md deleted file mode 100644 index b2f21cfc92..0000000000 --- a/vendor/github.com/pelletier/go-toml/SECURITY.md +++ /dev/null @@ -1,19 +0,0 @@ -# Security Policy - -## Supported Versions - -Use this section to tell people about which versions of your project are -currently being supported with security updates. - -| Version | Supported | -| ---------- | ------------------ | -| Latest 2.x | :white_check_mark: | -| All 1.x | :x: | -| All 0.x | :x: | - -## Reporting a Vulnerability - -Email a vulnerability report to `security@pelletier.codes`. Make sure to include -as many details as possible to reproduce the vulnerability. This is a -side-project: I will try to get back to you as quickly as possible, time -permitting in my personal life. Providing a working patch helps very much! diff --git a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml deleted file mode 100644 index 4af198b4d9..0000000000 --- a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml +++ /dev/null @@ -1,188 +0,0 @@ -trigger: -- master - -stages: -- stage: run_checks - displayName: "Check" - dependsOn: [] - jobs: - - job: fmt - displayName: "fmt" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - task: Go@0 - displayName: "go fmt ./..." - inputs: - command: 'custom' - customCommand: 'fmt' - arguments: './...' - - job: coverage - displayName: "coverage" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - task: Go@0 - displayName: "Generate coverage" - inputs: - command: 'test' - arguments: "-race -coverprofile=coverage.txt -covermode=atomic" - - task: Bash@3 - inputs: - targetType: 'inline' - script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}' - env: - CODECOV_TOKEN: $(CODECOV_TOKEN) - - job: benchmark - displayName: "benchmark" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" - - task: Bash@3 - inputs: - filePath: './benchmark.sh' - arguments: "master $(Build.Repository.Uri)" - - - job: go_unit_tests - displayName: "unit tests" - strategy: - matrix: - linux 1.16: - goVersion: '1.16' - imageName: 'ubuntu-latest' - mac 1.16: - goVersion: '1.16' - imageName: 'macOS-latest' - windows 1.16: - goVersion: '1.16' - imageName: 'windows-latest' - linux 1.15: - goVersion: '1.15' - imageName: 'ubuntu-latest' - mac 1.15: - goVersion: '1.15' - imageName: 'macOS-latest' - windows 1.15: - goVersion: '1.15' - imageName: 'windows-latest' - pool: - vmImage: $(imageName) - steps: - - task: GoTool@0 - displayName: "Install Go $(goVersion)" - inputs: - version: $(goVersion) - - task: Go@0 - displayName: "go test ./..." - inputs: - command: 'test' - arguments: './...' -- stage: build_binaries - displayName: "Build binaries" - dependsOn: run_checks - jobs: - - job: build_binary - displayName: "Build binary" - strategy: - matrix: - linux_amd64: - GOOS: linux - GOARCH: amd64 - darwin_amd64: - GOOS: darwin - GOARCH: amd64 - windows_amd64: - GOOS: windows - GOARCH: amd64 - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go" - inputs: - version: 1.16 - - task: Bash@3 - inputs: - targetType: inline - script: "make dist" - env: - go.goos: $(GOOS) - go.goarch: $(GOARCH) - - task: CopyFiles@2 - inputs: - sourceFolder: '$(Build.SourcesDirectory)' - contents: '*.tar.xz' - TargetFolder: '$(Build.ArtifactStagingDirectory)' - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: binaries -- stage: build_binaries_manifest - displayName: "Build binaries manifest" - dependsOn: build_binaries - jobs: - - job: build_manifest - displayName: "Build binaries manifest" - steps: - - task: DownloadBuildArtifacts@0 - inputs: - buildType: 'current' - downloadType: 'single' - artifactName: 'binaries' - downloadPath: '$(Build.SourcesDirectory)' - - task: Bash@3 - inputs: - targetType: inline - script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt" - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: manifest - -- stage: build_docker_image - displayName: "Build Docker image" - dependsOn: run_checks - jobs: - - job: build - displayName: "Build" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - command: 'build' - Dockerfile: 'Dockerfile' - buildContext: '.' - addPipelineData: false - -- stage: publish_docker_image - displayName: "Publish Docker image" - dependsOn: build_docker_image - condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) - jobs: - - job: publish - displayName: "Publish" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - containerRegistry: 'DockerHub' - repository: 'pelletier/go-toml' - command: 'buildAndPush' - Dockerfile: 'Dockerfile' - buildContext: '.' - tags: 'latest' diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh deleted file mode 100644 index a69d3040fa..0000000000 --- a/vendor/github.com/pelletier/go-toml/benchmark.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -set -ex - -reference_ref=${1:-master} -reference_git=${2:-.} - -if ! `hash benchstat 2>/dev/null`; then - echo "Installing benchstat" - go get golang.org/x/perf/cmd/benchstat -fi - -tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` -ref_tempdir="${tempdir}/ref" -ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt" -local_benchmark="`pwd`/benchmark-local.txt" - -echo "=== ${reference_ref} (${ref_tempdir})" -git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null -pushd ${ref_tempdir} >/dev/null -git checkout ${reference_ref} >/dev/null 2>/dev/null -go test -bench=. -benchmem | tee ${ref_benchmark} -cd benchmark -go test -bench=. -benchmem | tee -a ${ref_benchmark} -popd >/dev/null - -echo "" -echo "=== local" -go test -bench=. -benchmem | tee ${local_benchmark} -cd benchmark -go test -bench=. -benchmem | tee -a ${local_benchmark} - -echo "" -echo "=== diff" -benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go deleted file mode 100644 index a1406a32b3..0000000000 --- a/vendor/github.com/pelletier/go-toml/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Package toml is a TOML parser and manipulation library. -// -// This version supports the specification as described in -// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md -// -// Marshaling -// -// Go-toml can marshal and unmarshal TOML documents from and to data -// structures. -// -// TOML document as a tree -// -// Go-toml can operate on a TOML document as a tree. Use one of the Load* -// functions to parse TOML data and obtain a Tree instance, then one of its -// methods to manipulate the tree. -// -// JSONPath-like queries -// -// The package github.com/pelletier/go-toml/query implements a system -// similar to JSONPath to quickly retrieve elements of a TOML document using a -// single expression. See the package documentation for more information. -// -package toml diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml deleted file mode 100644 index 780d9c68f2..0000000000 --- a/vendor/github.com/pelletier/go-toml/example-crlf.toml +++ /dev/null @@ -1,30 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it -score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml deleted file mode 100644 index f45bf88b8f..0000000000 --- a/vendor/github.com/pelletier/go-toml/example.toml +++ /dev/null @@ -1,30 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it -score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go deleted file mode 100644 index 14570c8d35..0000000000 --- a/vendor/github.com/pelletier/go-toml/fuzz.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build gofuzz - -package toml - -func Fuzz(data []byte) int { - tree, err := LoadBytes(data) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - str, err := tree.ToTomlString() - if err != nil { - if str != "" { - panic(`str must be "" if there is an error`) - } - panic(err) - } - - tree, err = Load(str) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - return 1 -} diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh deleted file mode 100644 index 3204b4c446..0000000000 --- a/vendor/github.com/pelletier/go-toml/fuzz.sh +++ /dev/null @@ -1,15 +0,0 @@ -#! /bin/sh -set -eu - -go get github.com/dvyukov/go-fuzz/go-fuzz -go get github.com/dvyukov/go-fuzz/go-fuzz-build - -if [ ! -e toml-fuzz.zip ]; then - go-fuzz-build github.com/pelletier/go-toml -fi - -rm -fr fuzz -mkdir -p fuzz/corpus -cp *.toml fuzz/corpus - -go-fuzz -bin=toml-fuzz.zip -workdir=fuzz diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go deleted file mode 100644 index e091500b24..0000000000 --- a/vendor/github.com/pelletier/go-toml/keysparsing.go +++ /dev/null @@ -1,112 +0,0 @@ -// Parsing keys handling both bare and quoted keys. - -package toml - -import ( - "errors" - "fmt" -) - -// Convert the bare key group string to an array. -// The input supports double quotation and single quotation, -// but escape sequences are not supported. Lexers must unescape them beforehand. -func parseKey(key string) ([]string, error) { - runes := []rune(key) - var groups []string - - if len(key) == 0 { - return nil, errors.New("empty key") - } - - idx := 0 - for idx < len(runes) { - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip leading whitespace - } - if idx >= len(runes) { - break - } - r := runes[idx] - if isValidBareChar(r) { - // parse bare key - startIdx := idx - endIdx := -1 - idx++ - for idx < len(runes) { - r = runes[idx] - if isValidBareChar(r) { - idx++ - } else if r == '.' { - endIdx = idx - break - } else if isSpace(r) { - endIdx = idx - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip trailing whitespace - } - if idx < len(runes) && runes[idx] != '.' { - return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) - } - break - } else { - return nil, fmt.Errorf("invalid bare key character: %c", r) - } - } - if endIdx == -1 { - endIdx = idx - } - groups = append(groups, string(runes[startIdx:endIdx])) - } else if r == '\'' { - // parse single quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed single-quoted key") - } - r = runes[idx] - if r == '\'' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '"' { - // parse double quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed double-quoted key") - } - r = runes[idx] - if r == '"' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '.' { - idx++ - if idx >= len(runes) { - return nil, fmt.Errorf("unexpected end of key") - } - r = runes[idx] - if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { - return nil, fmt.Errorf("expecting key part after dot") - } - } else { - return nil, fmt.Errorf("invalid key character: %c", r) - } - } - if len(groups) == 0 { - return nil, fmt.Errorf("empty key") - } - return groups, nil -} - -func isValidBareChar(r rune) bool { - return isAlphanumeric(r) || r == '-' || isDigit(r) -} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go deleted file mode 100644 index 313908e3e0..0000000000 --- a/vendor/github.com/pelletier/go-toml/lexer.go +++ /dev/null @@ -1,1031 +0,0 @@ -// TOML lexer. -// -// Written using the principles developed by Rob Pike in -// http://www.youtube.com/watch?v=HxaD_trXwRE - -package toml - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" -) - -// Define state functions -type tomlLexStateFn func() tomlLexStateFn - -// Define lexer -type tomlLexer struct { - inputIdx int - input []rune // Textual source - currentTokenStart int - currentTokenStop int - tokens []token - brackets []rune - line int - col int - endbufferLine int - endbufferCol int -} - -// Basic read operations on input - -func (l *tomlLexer) read() rune { - r := l.peek() - if r == '\n' { - l.endbufferLine++ - l.endbufferCol = 1 - } else { - l.endbufferCol++ - } - l.inputIdx++ - return r -} - -func (l *tomlLexer) next() rune { - r := l.read() - - if r != eof { - l.currentTokenStop++ - } - return r -} - -func (l *tomlLexer) ignore() { - l.currentTokenStart = l.currentTokenStop - l.line = l.endbufferLine - l.col = l.endbufferCol -} - -func (l *tomlLexer) skip() { - l.next() - l.ignore() -} - -func (l *tomlLexer) fastForward(n int) { - for i := 0; i < n; i++ { - l.next() - } -} - -func (l *tomlLexer) emitWithValue(t tokenType, value string) { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: t, - val: value, - }) - l.ignore() -} - -func (l *tomlLexer) emit(t tokenType) { - l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) -} - -func (l *tomlLexer) peek() rune { - if l.inputIdx >= len(l.input) { - return eof - } - return l.input[l.inputIdx] -} - -func (l *tomlLexer) peekString(size int) string { - maxIdx := len(l.input) - upperIdx := l.inputIdx + size // FIXME: potential overflow - if upperIdx > maxIdx { - upperIdx = maxIdx - } - return string(l.input[l.inputIdx:upperIdx]) -} - -func (l *tomlLexer) follow(next string) bool { - return next == l.peekString(len(next)) -} - -// Error management - -func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: tokenError, - val: fmt.Sprintf(format, args...), - }) - return nil -} - -// State functions - -func (l *tomlLexer) lexVoid() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '}': // after '{' - return l.lexRightCurlyBrace - case '[': - return l.lexTableKey - case '#': - return l.lexComment(l.lexVoid) - case '=': - return l.lexEqual - case '\r': - fallthrough - case '\n': - l.skip() - continue - } - - if isSpace(next) { - l.skip() - } - - if isKeyStartChar(next) { - return l.lexKey - } - - if next == eof { - l.next() - break - } - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexRvalue() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '.': - return l.errorf("cannot start float with a dot") - case '=': - return l.lexEqual - case '[': - return l.lexLeftBracket - case ']': - return l.lexRightBracket - case '{': - return l.lexLeftCurlyBrace - case '}': - return l.lexRightCurlyBrace - case '#': - return l.lexComment(l.lexRvalue) - case '"': - return l.lexString - case '\'': - return l.lexLiteralString - case ',': - return l.lexComma - case '\r': - fallthrough - case '\n': - l.skip() - if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' { - return l.lexRvalue - } - return l.lexVoid - } - - if l.follow("true") { - return l.lexTrue - } - - if l.follow("false") { - return l.lexFalse - } - - if l.follow("inf") { - return l.lexInf - } - - if l.follow("nan") { - return l.lexNan - } - - if isSpace(next) { - l.skip() - continue - } - - if next == eof { - l.next() - break - } - - if next == '+' || next == '-' { - return l.lexNumber - } - - if isDigit(next) { - return l.lexDateTimeOrNumber - } - - return l.errorf("no value can start with %c", next) - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexDateTimeOrNumber() tomlLexStateFn { - // Could be either a date/time, or a digit. - // The options for date/times are: - // YYYY-... => date or date-time - // HH:... => time - // Anything else should be a number. - - lookAhead := l.peekString(5) - if len(lookAhead) < 3 { - return l.lexNumber() - } - - for idx, r := range lookAhead { - if !isDigit(r) { - if idx == 2 && r == ':' { - return l.lexDateTimeOrTime() - } - if idx == 4 && r == '-' { - return l.lexDateTimeOrTime() - } - return l.lexNumber() - } - } - return l.lexNumber() -} - -func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenLeftCurlyBrace) - l.brackets = append(l.brackets, '{') - return l.lexVoid -} - -func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenRightCurlyBrace) - if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' { - return l.errorf("cannot have '}' here") - } - l.brackets = l.brackets[:len(l.brackets)-1] - return l.lexRvalue -} - -func (l *tomlLexer) lexDateTimeOrTime() tomlLexStateFn { - // Example matches: - // 1979-05-27T07:32:00Z - // 1979-05-27T00:32:00-07:00 - // 1979-05-27T00:32:00.999999-07:00 - // 1979-05-27 07:32:00Z - // 1979-05-27 00:32:00-07:00 - // 1979-05-27 00:32:00.999999-07:00 - // 1979-05-27T07:32:00 - // 1979-05-27T00:32:00.999999 - // 1979-05-27 07:32:00 - // 1979-05-27 00:32:00.999999 - // 1979-05-27 - // 07:32:00 - // 00:32:00.999999 - - // we already know those two are digits - l.next() - l.next() - - // Got 2 digits. At that point it could be either a time or a date(-time). - - r := l.next() - if r == ':' { - return l.lexTime() - } - - return l.lexDateTime() -} - -func (l *tomlLexer) lexDateTime() tomlLexStateFn { - // This state accepts an offset date-time, a local date-time, or a local date. - // - // v--- cursor - // 1979-05-27T07:32:00Z - // 1979-05-27T00:32:00-07:00 - // 1979-05-27T00:32:00.999999-07:00 - // 1979-05-27 07:32:00Z - // 1979-05-27 00:32:00-07:00 - // 1979-05-27 00:32:00.999999-07:00 - // 1979-05-27T07:32:00 - // 1979-05-27T00:32:00.999999 - // 1979-05-27 07:32:00 - // 1979-05-27 00:32:00.999999 - // 1979-05-27 - - // date - - // already checked by lexRvalue - l.next() // digit - l.next() // - - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid month digit in date: %c", r) - } - } - - r := l.next() - if r != '-' { - return l.errorf("expected - to separate month of a date, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid day digit in date: %c", r) - } - } - - l.emit(tokenLocalDate) - - r = l.peek() - - if r == eof { - - return l.lexRvalue - } - - if r != ' ' && r != 'T' { - return l.errorf("incorrect date/time separation character: %c", r) - } - - if r == ' ' { - lookAhead := l.peekString(3)[1:] - if len(lookAhead) < 2 { - return l.lexRvalue - } - for _, r := range lookAhead { - if !isDigit(r) { - return l.lexRvalue - } - } - } - - l.skip() // skip the T or ' ' - - // time - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid hour digit in time: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time hour/minute separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time minute/second separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid second digit in time: %c", r) - } - } - - r = l.peek() - if r == '.' { - l.next() - r := l.next() - if !isDigit(r) { - return l.errorf("expected at least one digit in time's fraction, not %c", r) - } - - for { - r := l.peek() - if !isDigit(r) { - break - } - l.next() - } - } - - l.emit(tokenLocalTime) - - return l.lexTimeOffset - -} - -func (l *tomlLexer) lexTimeOffset() tomlLexStateFn { - // potential offset - - // Z - // -07:00 - // +07:00 - // nothing - - r := l.peek() - - if r == 'Z' { - l.next() - l.emit(tokenTimeOffset) - } else if r == '+' || r == '-' { - l.next() - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid hour digit in time offset: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time offset hour/minute separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time offset: %c", r) - } - } - - l.emit(tokenTimeOffset) - } - - return l.lexRvalue -} - -func (l *tomlLexer) lexTime() tomlLexStateFn { - // v--- cursor - // 07:32:00 - // 00:32:00.999999 - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time: %c", r) - } - } - - r := l.next() - if r != ':' { - return l.errorf("time minute/second separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid second digit in time: %c", r) - } - } - - r = l.peek() - if r == '.' { - l.next() - r := l.next() - if !isDigit(r) { - return l.errorf("expected at least one digit in time's fraction, not %c", r) - } - - for { - r := l.peek() - if !isDigit(r) { - break - } - l.next() - } - } - - l.emit(tokenLocalTime) - return l.lexRvalue - -} - -func (l *tomlLexer) lexTrue() tomlLexStateFn { - l.fastForward(4) - l.emit(tokenTrue) - return l.lexRvalue -} - -func (l *tomlLexer) lexFalse() tomlLexStateFn { - l.fastForward(5) - l.emit(tokenFalse) - return l.lexRvalue -} - -func (l *tomlLexer) lexInf() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenInf) - return l.lexRvalue -} - -func (l *tomlLexer) lexNan() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenNan) - return l.lexRvalue -} - -func (l *tomlLexer) lexEqual() tomlLexStateFn { - l.next() - l.emit(tokenEqual) - return l.lexRvalue -} - -func (l *tomlLexer) lexComma() tomlLexStateFn { - l.next() - l.emit(tokenComma) - if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' { - return l.lexVoid - } - return l.lexRvalue -} - -// Parse the key and emits its value without escape sequences. -// bare keys, basic string keys and literal string keys are supported. -func (l *tomlLexer) lexKey() tomlLexStateFn { - var sb strings.Builder - - for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { - if r == '"' { - l.next() - str, err := l.lexStringAsString(`"`, false, true) - if err != nil { - return l.errorf(err.Error()) - } - sb.WriteString("\"") - sb.WriteString(str) - sb.WriteString("\"") - l.next() - continue - } else if r == '\'' { - l.next() - str, err := l.lexLiteralStringAsString(`'`, false) - if err != nil { - return l.errorf(err.Error()) - } - sb.WriteString("'") - sb.WriteString(str) - sb.WriteString("'") - l.next() - continue - } else if r == '\n' { - return l.errorf("keys cannot contain new lines") - } else if isSpace(r) { - var str strings.Builder - str.WriteString(" ") - - // skip trailing whitespace - l.next() - for r = l.peek(); isSpace(r); r = l.peek() { - str.WriteRune(r) - l.next() - } - // break loop if not a dot - if r != '.' { - break - } - str.WriteString(".") - // skip trailing whitespace after dot - l.next() - for r = l.peek(); isSpace(r); r = l.peek() { - str.WriteRune(r) - l.next() - } - sb.WriteString(str.String()) - continue - } else if r == '.' { - // skip - } else if !isValidBareChar(r) { - return l.errorf("keys cannot contain %c character", r) - } - sb.WriteRune(r) - l.next() - } - l.emitWithValue(tokenKey, sb.String()) - return l.lexVoid -} - -func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { - return func() tomlLexStateFn { - for next := l.peek(); next != '\n' && next != eof; next = l.peek() { - if next == '\r' && l.follow("\r\n") { - break - } - l.next() - } - l.ignore() - return previousState - } -} - -func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { - l.next() - l.emit(tokenLeftBracket) - l.brackets = append(l.brackets, '[') - return l.lexRvalue -} - -func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { - var sb strings.Builder - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - // find end of string - for { - if l.follow(terminator) { - return sb.String(), nil - } - - next := l.peek() - if next == eof { - break - } - sb.WriteRune(l.next()) - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexLiteralString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := "'" - discardLeadingNewLine := false - if l.follow("''") { - l.skip() - l.skip() - terminator = "'''" - discardLeadingNewLine = true - } - - str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -// Lex a string and return the results as a string. -// Terminator is the substring indicating the end of the token. -// The resulting string does not include the terminator. -func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { - var sb strings.Builder - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - for { - if l.follow(terminator) { - return sb.String(), nil - } - - if l.follow("\\") { - l.next() - switch l.peek() { - case '\r': - fallthrough - case '\n': - fallthrough - case '\t': - fallthrough - case ' ': - // skip all whitespace chars following backslash - for strings.ContainsRune("\r\n\t ", l.peek()) { - l.next() - } - case '"': - sb.WriteString("\"") - l.next() - case 'n': - sb.WriteString("\n") - l.next() - case 'b': - sb.WriteString("\b") - l.next() - case 'f': - sb.WriteString("\f") - l.next() - case '/': - sb.WriteString("/") - l.next() - case 't': - sb.WriteString("\t") - l.next() - case 'r': - sb.WriteString("\r") - l.next() - case '\\': - sb.WriteString("\\") - l.next() - case 'u': - l.next() - var code strings.Builder - for i := 0; i < 4; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code.WriteRune(c) - } - intcode, err := strconv.ParseInt(code.String(), 16, 32) - if err != nil { - return "", errors.New("invalid unicode escape: \\u" + code.String()) - } - sb.WriteRune(rune(intcode)) - case 'U': - l.next() - var code strings.Builder - for i := 0; i < 8; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code.WriteRune(c) - } - intcode, err := strconv.ParseInt(code.String(), 16, 64) - if err != nil { - return "", errors.New("invalid unicode escape: \\U" + code.String()) - } - sb.WriteRune(rune(intcode)) - default: - return "", errors.New("invalid escape sequence: \\" + string(l.peek())) - } - } else { - r := l.peek() - - if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) { - return "", fmt.Errorf("unescaped control character %U", r) - } - l.next() - sb.WriteRune(r) - } - - if l.peek() == eof { - break - } - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := `"` - discardLeadingNewLine := false - acceptNewLines := false - if l.follow(`""`) { - l.skip() - l.skip() - terminator = `"""` - discardLeadingNewLine = true - acceptNewLines = true - } - - str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -func (l *tomlLexer) lexTableKey() tomlLexStateFn { - l.next() - - if l.peek() == '[' { - // token '[[' signifies an array of tables - l.next() - l.emit(tokenDoubleLeftBracket) - return l.lexInsideTableArrayKey - } - // vanilla table key - l.emit(tokenLeftBracket) - return l.lexInsideTableKey -} - -// Parse the key till "]]", but only bare keys are supported -func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroupArray) - } - l.next() - if l.peek() != ']' { - break - } - l.next() - l.emit(tokenDoubleRightBracket) - return l.lexVoid - case '[': - return l.errorf("table array key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table array key") -} - -// Parse the key till "]" but only bare keys are supported -func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroup) - } - l.next() - l.emit(tokenRightBracket) - return l.lexVoid - case '[': - return l.errorf("table key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table key") -} - -func (l *tomlLexer) lexRightBracket() tomlLexStateFn { - l.next() - l.emit(tokenRightBracket) - if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' { - return l.errorf("cannot have ']' here") - } - l.brackets = l.brackets[:len(l.brackets)-1] - return l.lexRvalue -} - -type validRuneFn func(r rune) bool - -func isValidHexRune(r rune) bool { - return r >= 'a' && r <= 'f' || - r >= 'A' && r <= 'F' || - r >= '0' && r <= '9' || - r == '_' -} - -func isValidOctalRune(r rune) bool { - return r >= '0' && r <= '7' || r == '_' -} - -func isValidBinaryRune(r rune) bool { - return r == '0' || r == '1' || r == '_' -} - -func (l *tomlLexer) lexNumber() tomlLexStateFn { - r := l.peek() - - if r == '0' { - follow := l.peekString(2) - if len(follow) == 2 { - var isValidRune validRuneFn - switch follow[1] { - case 'x': - isValidRune = isValidHexRune - case 'o': - isValidRune = isValidOctalRune - case 'b': - isValidRune = isValidBinaryRune - default: - if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { - return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) - } - } - - if isValidRune != nil { - l.next() - l.next() - digitSeen := false - for { - next := l.peek() - if !isValidRune(next) { - break - } - digitSeen = true - l.next() - } - - if !digitSeen { - return l.errorf("number needs at least one digit") - } - - l.emit(tokenInteger) - - return l.lexRvalue - } - } - } - - if r == '+' || r == '-' { - l.next() - if l.follow("inf") { - return l.lexInf - } - if l.follow("nan") { - return l.lexNan - } - } - - pointSeen := false - expSeen := false - digitSeen := false - for { - next := l.peek() - if next == '.' { - if pointSeen { - return l.errorf("cannot have two dots in one float") - } - l.next() - if !isDigit(l.peek()) { - return l.errorf("float cannot end with a dot") - } - pointSeen = true - } else if next == 'e' || next == 'E' { - expSeen = true - l.next() - r := l.peek() - if r == '+' || r == '-' { - l.next() - } - } else if isDigit(next) { - digitSeen = true - l.next() - } else if next == '_' { - l.next() - } else { - break - } - if pointSeen && !digitSeen { - return l.errorf("cannot start float with a dot") - } - } - - if !digitSeen { - return l.errorf("no digit in that number") - } - if pointSeen || expSeen { - l.emit(tokenFloat) - } else { - l.emit(tokenInteger) - } - return l.lexRvalue -} - -func (l *tomlLexer) run() { - for state := l.lexVoid; state != nil; { - state = state() - } -} - -// Entry point -func lexToml(inputBytes []byte) []token { - runes := bytes.Runes(inputBytes) - l := &tomlLexer{ - input: runes, - tokens: make([]token, 0, 256), - line: 1, - col: 1, - endbufferLine: 1, - endbufferCol: 1, - } - l.run() - return l.tokens -} diff --git a/vendor/github.com/pelletier/go-toml/localtime.go b/vendor/github.com/pelletier/go-toml/localtime.go deleted file mode 100644 index 9dfe4b9e6c..0000000000 --- a/vendor/github.com/pelletier/go-toml/localtime.go +++ /dev/null @@ -1,287 +0,0 @@ -// Implementation of TOML's local date/time. -// -// Copied over from Google's civil to avoid pulling all the Google dependencies. -// Originals: -// https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go -// Changes: -// * Renamed files from civil* to localtime*. -// * Package changed from civil to toml. -// * 'Local' prefix added to all structs. -// -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package civil implements types for civil time, a time-zone-independent -// representation of time that follows the rules of the proleptic -// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second -// minutes. -// -// Because they lack location information, these types do not represent unique -// moments or intervals of time. Use time.Time for that purpose. -package toml - -import ( - "fmt" - "time" -) - -// A LocalDate represents a date (year, month, day). -// -// This type does not include location information, and therefore does not -// describe a unique 24-hour timespan. -type LocalDate struct { - Year int // Year (e.g., 2014). - Month time.Month // Month of the year (January = 1, ...). - Day int // Day of the month, starting at 1. -} - -// LocalDateOf returns the LocalDate in which a time occurs in that time's location. -func LocalDateOf(t time.Time) LocalDate { - var d LocalDate - d.Year, d.Month, d.Day = t.Date() - return d -} - -// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. -func ParseLocalDate(s string) (LocalDate, error) { - t, err := time.Parse("2006-01-02", s) - if err != nil { - return LocalDate{}, err - } - return LocalDateOf(t), nil -} - -// String returns the date in RFC3339 full-date format. -func (d LocalDate) String() string { - return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) -} - -// IsValid reports whether the date is valid. -func (d LocalDate) IsValid() bool { - return LocalDateOf(d.In(time.UTC)) == d -} - -// In returns the time corresponding to time 00:00:00 of the date in the location. -// -// In is always consistent with time.LocalDate, even when time.LocalDate returns a time -// on a different day. For example, if loc is America/Indiana/Vincennes, then both -// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) -// and -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) -// return 23:00:00 on April 30, 1955. -// -// In panics if loc is nil. -func (d LocalDate) In(loc *time.Location) time.Time { - return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) -} - -// AddDays returns the date that is n days in the future. -// n can also be negative to go into the past. -func (d LocalDate) AddDays(n int) LocalDate { - return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) -} - -// DaysSince returns the signed number of days between the date and s, not including the end day. -// This is the inverse operation to AddDays. -func (d LocalDate) DaysSince(s LocalDate) (days int) { - // We convert to Unix time so we do not have to worry about leap seconds: - // Unix time increases by exactly 86400 seconds per day. - deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() - return int(deltaUnix / 86400) -} - -// Before reports whether d1 occurs before d2. -func (d1 LocalDate) Before(d2 LocalDate) bool { - if d1.Year != d2.Year { - return d1.Year < d2.Year - } - if d1.Month != d2.Month { - return d1.Month < d2.Month - } - return d1.Day < d2.Day -} - -// After reports whether d1 occurs after d2. -func (d1 LocalDate) After(d2 LocalDate) bool { - return d2.Before(d1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of d.String(). -func (d LocalDate) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The date is expected to be a string in a format accepted by ParseLocalDate. -func (d *LocalDate) UnmarshalText(data []byte) error { - var err error - *d, err = ParseLocalDate(string(data)) - return err -} - -// A LocalTime represents a time with nanosecond precision. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -// -// This type exists to represent the TIME type in storage-based APIs like BigQuery. -// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. -type LocalTime struct { - Hour int // The hour of the day in 24-hour format; range [0-23] - Minute int // The minute of the hour; range [0-59] - Second int // The second of the minute; range [0-59] - Nanosecond int // The nanosecond of the second; range [0-999999999] -} - -// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs -// in that time's location. It ignores the date. -func LocalTimeOf(t time.Time) LocalTime { - var tm LocalTime - tm.Hour, tm.Minute, tm.Second = t.Clock() - tm.Nanosecond = t.Nanosecond() - return tm -} - -// ParseLocalTime parses a string and returns the time value it represents. -// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After -// the HH:MM:SS part of the string, an optional fractional part may appear, -// consisting of a decimal point followed by one to nine decimal digits. -// (RFC3339 admits only one digit after the decimal point). -func ParseLocalTime(s string) (LocalTime, error) { - t, err := time.Parse("15:04:05.999999999", s) - if err != nil { - return LocalTime{}, err - } - return LocalTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalTime. If Nanoseconds -// is zero, no fractional part will be generated. Otherwise, the result will -// end with a fractional part consisting of a decimal point and nine digits. -func (t LocalTime) String() string { - s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) - if t.Nanosecond == 0 { - return s - } - return s + fmt.Sprintf(".%09d", t.Nanosecond) -} - -// IsValid reports whether the time is valid. -func (t LocalTime) IsValid() bool { - // Construct a non-zero time. - tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) - return LocalTimeOf(tm) == t -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of t.String(). -func (t LocalTime) MarshalText() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The time is expected to be a string in a format accepted by ParseLocalTime. -func (t *LocalTime) UnmarshalText(data []byte) error { - var err error - *t, err = ParseLocalTime(string(data)) - return err -} - -// A LocalDateTime represents a date and time. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -type LocalDateTime struct { - Date LocalDate - Time LocalTime -} - -// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. - -// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. -func LocalDateTimeOf(t time.Time) LocalDateTime { - return LocalDateTime{ - Date: LocalDateOf(t), - Time: LocalTimeOf(t), - } -} - -// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. -// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits -// the time offset but includes an optional fractional time, as described in -// ParseLocalTime. Informally, the accepted format is -// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] -// where the 'T' may be a lower-case 't'. -func ParseLocalDateTime(s string) (LocalDateTime, error) { - t, err := time.Parse("2006-01-02T15:04:05.999999999", s) - if err != nil { - t, err = time.Parse("2006-01-02t15:04:05.999999999", s) - if err != nil { - return LocalDateTime{}, err - } - } - return LocalDateTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalDate. -func (dt LocalDateTime) String() string { - return dt.Date.String() + "T" + dt.Time.String() -} - -// IsValid reports whether the datetime is valid. -func (dt LocalDateTime) IsValid() bool { - return dt.Date.IsValid() && dt.Time.IsValid() -} - -// In returns the time corresponding to the LocalDateTime in the given location. -// -// If the time is missing or ambigous at the location, In returns the same -// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then -// both -// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) -// and -// civil.LocalDateTime{ -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, -// civil.LocalTime{Minute: 30}}.In(loc) -// return 23:30:00 on April 30, 1955. -// -// In panics if loc is nil. -func (dt LocalDateTime) In(loc *time.Location) time.Time { - return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) -} - -// Before reports whether dt1 occurs before dt2. -func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { - return dt1.In(time.UTC).Before(dt2.In(time.UTC)) -} - -// After reports whether dt1 occurs after dt2. -func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { - return dt2.Before(dt1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of dt.String(). -func (dt LocalDateTime) MarshalText() ([]byte, error) { - return []byte(dt.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The datetime is expected to be a string in a format accepted by ParseLocalDateTime -func (dt *LocalDateTime) UnmarshalText(data []byte) error { - var err error - *dt, err = ParseLocalDateTime(string(data)) - return err -} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go deleted file mode 100644 index 5712730498..0000000000 --- a/vendor/github.com/pelletier/go-toml/marshal.go +++ /dev/null @@ -1,1308 +0,0 @@ -package toml - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -const ( - tagFieldName = "toml" - tagFieldComment = "comment" - tagCommented = "commented" - tagMultiline = "multiline" - tagLiteral = "literal" - tagDefault = "default" -) - -type tomlOpts struct { - name string - nameFromTag bool - comment string - commented bool - multiline bool - literal bool - include bool - omitempty bool - defaultValue string -} - -type encOpts struct { - quoteMapKeys bool - arraysOneElementPerLine bool -} - -var encOptsDefaults = encOpts{ - quoteMapKeys: false, -} - -type annotation struct { - tag string - comment string - commented string - multiline string - literal string - defaultValue string -} - -var annotationDefault = annotation{ - tag: tagFieldName, - comment: tagFieldComment, - commented: tagCommented, - multiline: tagMultiline, - literal: tagLiteral, - defaultValue: tagDefault, -} - -type MarshalOrder int - -// Orders the Encoder can write the fields to the output stream. -const ( - // Sort fields alphabetically. - OrderAlphabetical MarshalOrder = iota + 1 - // Preserve the order the fields are encountered. For example, the order of fields in - // a struct. - OrderPreserve -) - -var timeType = reflect.TypeOf(time.Time{}) -var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() -var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() -var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() -var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() -var localDateType = reflect.TypeOf(LocalDate{}) -var localTimeType = reflect.TypeOf(LocalTime{}) -var localDateTimeType = reflect.TypeOf(LocalDateTime{}) -var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) - -// Check if the given marshal type maps to a Tree primitive -func isPrimitive(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isPrimitive(mtype.Elem()) - case reflect.Bool: - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Struct: - return isTimeType(mtype) - default: - return false - } -} - -func isTimeType(mtype reflect.Type) bool { - return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType -} - -// Check if the given marshal type maps to a Tree slice or array -func isTreeSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTreeSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isTree(mtype.Elem()) - default: - return false - } -} - -// Check if the given marshal type maps to a slice or array of a custom marshaler type -func isCustomMarshalerSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isCustomMarshalerSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type()) - default: - return false - } -} - -// Check if the given marshal type maps to a slice or array of a text marshaler type -func isTextMarshalerSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTextMarshalerSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type()) - default: - return false - } -} - -// Check if the given marshal type maps to a non-Tree slice or array -func isOtherSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isOtherSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return !isTreeSequence(mtype) - default: - return false - } -} - -// Check if the given marshal type maps to a Tree -func isTree(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTree(mtype.Elem()) - case reflect.Map: - return true - case reflect.Struct: - return !isPrimitive(mtype) - default: - return false - } -} - -func isCustomMarshaler(mtype reflect.Type) bool { - return mtype.Implements(marshalerType) -} - -func callCustomMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(Marshaler).MarshalTOML() -} - -func isTextMarshaler(mtype reflect.Type) bool { - return mtype.Implements(textMarshalerType) && !isTimeType(mtype) -} - -func callTextMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(encoding.TextMarshaler).MarshalText() -} - -func isCustomUnmarshaler(mtype reflect.Type) bool { - return mtype.Implements(unmarshalerType) -} - -func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error { - return mval.Interface().(Unmarshaler).UnmarshalTOML(tval) -} - -func isTextUnmarshaler(mtype reflect.Type) bool { - return mtype.Implements(textUnmarshalerType) -} - -func callTextUnmarshaler(mval reflect.Value, text []byte) error { - return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text) -} - -// Marshaler is the interface implemented by types that -// can marshal themselves into valid TOML. -type Marshaler interface { - MarshalTOML() ([]byte, error) -} - -// Unmarshaler is the interface implemented by types that -// can unmarshal a TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -/* -Marshal returns the TOML encoding of v. Behavior is similar to the Go json -encoder, except that there is no concept of a Marshaler interface or MarshalTOML -function for sub-structs, and currently only definite types can be marshaled -(i.e. no `interface{}`). - -The following struct annotations are supported: - - toml:"Field" Overrides the field's name to output. - omitempty When set, empty values and groups are not emitted. - comment:"comment" Emits a # comment on the same line. This supports new lines. - commented:"true" Emits the value as commented. - -Note that pointers are automatically assigned the "omitempty" option, as TOML -explicitly does not handle null values (saying instead the label should be -dropped). - -Tree structural types and corresponding marshal types: - - *Tree (*)struct, (*)map[string]interface{} - []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} - []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) - interface{} (*)primitive - -Tree primitive types and corresponding marshal types: - - uint64 uint, uint8-uint64, pointers to same - int64 int, int8-uint64, pointers to same - float64 float32, float64, pointers to same - string string, pointers to same - bool bool, pointers to same - time.LocalTime time.LocalTime{}, pointers to same - -For additional flexibility, use the Encoder API. -*/ -func Marshal(v interface{}) ([]byte, error) { - return NewEncoder(nil).marshal(v) -} - -// Encoder writes TOML values to an output stream. -type Encoder struct { - w io.Writer - encOpts - annotation - line int - col int - order MarshalOrder - promoteAnon bool - compactComments bool - indentation string -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - encOpts: encOptsDefaults, - annotation: annotationDefault, - line: 0, - col: 1, - order: OrderAlphabetical, - indentation: " ", - } -} - -// Encode writes the TOML encoding of v to the stream. -// -// See the documentation for Marshal for details. -func (e *Encoder) Encode(v interface{}) error { - b, err := e.marshal(v) - if err != nil { - return err - } - if _, err := e.w.Write(b); err != nil { - return err - } - return nil -} - -// QuoteMapKeys sets up the encoder to encode -// maps with string type keys with quoted TOML keys. -// -// This relieves the character limitations on map keys. -func (e *Encoder) QuoteMapKeys(v bool) *Encoder { - e.quoteMapKeys = v - return e -} - -// ArraysWithOneElementPerLine sets up the encoder to encode arrays -// with more than one element on multiple lines instead of one. -// -// For example: -// -// A = [1,2,3] -// -// Becomes -// -// A = [ -// 1, -// 2, -// 3, -// ] -func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { - e.arraysOneElementPerLine = v - return e -} - -// Order allows to change in which order fields will be written to the output stream. -func (e *Encoder) Order(ord MarshalOrder) *Encoder { - e.order = ord - return e -} - -// Indentation allows to change indentation when marshalling. -func (e *Encoder) Indentation(indent string) *Encoder { - e.indentation = indent - return e -} - -// SetTagName allows changing default tag "toml" -func (e *Encoder) SetTagName(v string) *Encoder { - e.tag = v - return e -} - -// SetTagComment allows changing default tag "comment" -func (e *Encoder) SetTagComment(v string) *Encoder { - e.comment = v - return e -} - -// SetTagCommented allows changing default tag "commented" -func (e *Encoder) SetTagCommented(v string) *Encoder { - e.commented = v - return e -} - -// SetTagMultiline allows changing default tag "multiline" -func (e *Encoder) SetTagMultiline(v string) *Encoder { - e.multiline = v - return e -} - -// PromoteAnonymous allows to change how anonymous struct fields are marshaled. -// Usually, they are marshaled as if the inner exported fields were fields in -// the outer struct. However, if an anonymous struct field is given a name in -// its TOML tag, it is treated like a regular struct field with that name. -// rather than being anonymous. -// -// In case anonymous promotion is enabled, all anonymous structs are promoted -// and treated like regular struct fields. -func (e *Encoder) PromoteAnonymous(promote bool) *Encoder { - e.promoteAnon = promote - return e -} - -// CompactComments removes the new line before each comment in the tree. -func (e *Encoder) CompactComments(cc bool) *Encoder { - e.compactComments = cc - return e -} - -func (e *Encoder) marshal(v interface{}) ([]byte, error) { - // Check if indentation is valid - for _, char := range e.indentation { - if !isSpace(char) { - return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters") - } - } - - mtype := reflect.TypeOf(v) - if mtype == nil { - return []byte{}, errors.New("nil cannot be marshaled to TOML") - } - - switch mtype.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Ptr: - if mtype.Elem().Kind() != reflect.Struct { - return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") - } - if reflect.ValueOf(v).IsNil() { - return []byte{}, errors.New("nil pointer cannot be marshaled to TOML") - } - default: - return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") - } - - sval := reflect.ValueOf(v) - if isCustomMarshaler(mtype) { - return callCustomMarshaler(sval) - } - if isTextMarshaler(mtype) { - return callTextMarshaler(sval) - } - t, err := e.valueToTree(mtype, sval) - if err != nil { - return []byte{}, err - } - - var buf bytes.Buffer - _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, e.compactComments, false) - - return buf.Bytes(), err -} - -// Create next tree with a position based on Encoder.line -func (e *Encoder) nextTree() *Tree { - return newTreeWithPosition(Position{Line: e.line, Col: 1}) -} - -// Convert given marshal struct or map value to toml tree -func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { - if mtype.Kind() == reflect.Ptr { - return e.valueToTree(mtype.Elem(), mval.Elem()) - } - tval := e.nextTree() - switch mtype.Kind() { - case reflect.Struct: - switch mval.Interface().(type) { - case Tree: - reflect.ValueOf(tval).Elem().Set(mval) - default: - for i := 0; i < mtype.NumField(); i++ { - mtypef, mvalf := mtype.Field(i), mval.Field(i) - opts := tomlOptions(mtypef, e.annotation) - if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) { - val, err := e.valueToToml(mtypef.Type, mvalf) - if err != nil { - return nil, err - } - if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon { - e.appendTree(tval, tree) - } else { - val = e.wrapTomlValue(val, tval) - tval.SetPathWithOptions([]string{opts.name}, SetOptions{ - Comment: opts.comment, - Commented: opts.commented, - Multiline: opts.multiline, - Literal: opts.literal, - }, val) - } - } - } - } - case reflect.Map: - keys := mval.MapKeys() - if e.order == OrderPreserve && len(keys) > 0 { - // Sorting []reflect.Value is not straight forward. - // - // OrderPreserve will support deterministic results when string is used - // as the key to maps. - typ := keys[0].Type() - kind := keys[0].Kind() - if kind == reflect.String { - ikeys := make([]string, len(keys)) - for i := range keys { - ikeys[i] = keys[i].Interface().(string) - } - sort.Strings(ikeys) - for i := range ikeys { - keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) - } - } - } - for _, key := range keys { - mvalf := mval.MapIndex(key) - if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() { - continue - } - val, err := e.valueToToml(mtype.Elem(), mvalf) - if err != nil { - return nil, err - } - val = e.wrapTomlValue(val, tval) - if e.quoteMapKeys { - keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine) - if err != nil { - return nil, err - } - tval.SetPath([]string{keyStr}, val) - } else { - tval.SetPath([]string{key.String()}, val) - } - } - } - return tval, nil -} - -// Convert given marshal slice to slice of Toml trees -func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { - tval := make([]*Tree, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal slice to slice of toml values -func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - tval := make([]interface{}, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal value to toml value -func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - if mtype.Kind() == reflect.Ptr { - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTextMarshaler(mtype): - b, err := callTextMarshaler(mval) - return string(b), err - default: - return e.valueToToml(mtype.Elem(), mval.Elem()) - } - } - if mtype.Kind() == reflect.Interface { - return e.valueToToml(mval.Elem().Type(), mval.Elem()) - } - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTextMarshaler(mtype): - b, err := callTextMarshaler(mval) - return string(b), err - case isTree(mtype): - return e.valueToTree(mtype, mval) - case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype): - return e.valueToOtherSlice(mtype, mval) - case isTreeSequence(mtype): - return e.valueToTreeSlice(mtype, mval) - default: - switch mtype.Kind() { - case reflect.Bool: - return mval.Bool(), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { - return fmt.Sprint(mval), nil - } - return mval.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return mval.Uint(), nil - case reflect.Float32, reflect.Float64: - return mval.Float(), nil - case reflect.String: - return mval.String(), nil - case reflect.Struct: - return mval.Interface(), nil - default: - return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) - } - } -} - -func (e *Encoder) appendTree(t, o *Tree) error { - for key, value := range o.values { - if _, ok := t.values[key]; ok { - continue - } - if tomlValue, ok := value.(*tomlValue); ok { - tomlValue.position.Col = t.position.Col - } - t.values[key] = value - } - return nil -} - -// Create a toml value with the current line number as the position line -func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} { - _, isTree := val.(*Tree) - _, isTreeS := val.([]*Tree) - if isTree || isTreeS { - e.line++ - return val - } - - ret := &tomlValue{ - value: val, - position: Position{ - e.line, - parent.position.Col, - }, - } - e.line++ - return ret -} - -// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. -// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for -// sub-structs, and only definite types can be unmarshaled. -func (t *Tree) Unmarshal(v interface{}) error { - d := Decoder{tval: t, tagName: tagFieldName} - return d.unmarshal(v) -} - -// Marshal returns the TOML encoding of Tree. -// See Marshal() documentation for types mapping table. -func (t *Tree) Marshal() ([]byte, error) { - var buf bytes.Buffer - _, err := t.WriteTo(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// Unmarshal parses the TOML-encoded data and stores the result in the value -// pointed to by v. Behavior is similar to the Go json encoder, except that there -// is no concept of an Unmarshaler interface or UnmarshalTOML function for -// sub-structs, and currently only definite types can be unmarshaled to (i.e. no -// `interface{}`). -// -// The following struct annotations are supported: -// -// toml:"Field" Overrides the field's name to map to. -// default:"foo" Provides a default value. -// -// For default values, only fields of the following types are supported: -// * string -// * bool -// * int -// * int64 -// * float64 -// -// See Marshal() documentation for types mapping table. -func Unmarshal(data []byte, v interface{}) error { - t, err := LoadReader(bytes.NewReader(data)) - if err != nil { - return err - } - return t.Unmarshal(v) -} - -// Decoder reads and decodes TOML values from an input stream. -type Decoder struct { - r io.Reader - tval *Tree - encOpts - tagName string - strict bool - visitor visitorState -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - r: r, - encOpts: encOptsDefaults, - tagName: tagFieldName, - } -} - -// Decode reads a TOML-encoded value from it's input -// and unmarshals it in the value pointed at by v. -// -// See the documentation for Marshal for details. -func (d *Decoder) Decode(v interface{}) error { - var err error - d.tval, err = LoadReader(d.r) - if err != nil { - return err - } - return d.unmarshal(v) -} - -// SetTagName allows changing default tag "toml" -func (d *Decoder) SetTagName(v string) *Decoder { - d.tagName = v - return d -} - -// Strict allows changing to strict decoding. Any fields that are found in the -// input data and do not have a corresponding struct member cause an error. -func (d *Decoder) Strict(strict bool) *Decoder { - d.strict = strict - return d -} - -func (d *Decoder) unmarshal(v interface{}) error { - mtype := reflect.TypeOf(v) - if mtype == nil { - return errors.New("nil cannot be unmarshaled from TOML") - } - if mtype.Kind() != reflect.Ptr { - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - elem := mtype.Elem() - - switch elem.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Interface: - elem = mapStringInterfaceType - default: - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - if reflect.ValueOf(v).IsNil() { - return errors.New("nil pointer cannot be unmarshaled from TOML") - } - - vv := reflect.ValueOf(v).Elem() - - if d.strict { - d.visitor = newVisitorState(d.tval) - } - - sval, err := d.valueFromTree(elem, d.tval, &vv) - if err != nil { - return err - } - if err := d.visitor.validate(); err != nil { - return err - } - reflect.ValueOf(v).Elem().Set(sval) - return nil -} - -// Convert toml tree to marshal struct or map, using marshal type. When mval1 -// is non-nil, merge fields into the given value instead of allocating a new one. -func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) - } - - // Check if pointer to value implements the Unmarshaler interface. - if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) { - d.visitor.visitAll() - - if tval == nil { - return mvalPtr.Elem(), nil - } - - if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) - } - return mvalPtr.Elem(), nil - } - - var mval reflect.Value - switch mtype.Kind() { - case reflect.Struct: - if mval1 != nil { - mval = *mval1 - } else { - mval = reflect.New(mtype).Elem() - } - - switch mval.Interface().(type) { - case Tree: - mval.Set(reflect.ValueOf(tval).Elem()) - default: - for i := 0; i < mtype.NumField(); i++ { - mtypef := mtype.Field(i) - an := annotation{tag: d.tagName} - opts := tomlOptions(mtypef, an) - if !opts.include { - continue - } - baseKey := opts.name - keysToTry := []string{ - baseKey, - strings.ToLower(baseKey), - strings.ToTitle(baseKey), - strings.ToLower(string(baseKey[0])) + baseKey[1:], - } - - found := false - if tval != nil { - for _, key := range keysToTry { - exists := tval.HasPath([]string{key}) - if !exists { - continue - } - - d.visitor.push(key) - val := tval.GetPath([]string{key}) - fval := mval.Field(i) - mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) - if err != nil { - return mval, formatError(err, tval.GetPositionPath([]string{key})) - } - mval.Field(i).Set(mvalf) - found = true - d.visitor.pop() - break - } - } - - if !found && opts.defaultValue != "" { - mvalf := mval.Field(i) - var val interface{} - var err error - switch mvalf.Kind() { - case reflect.String: - val = opts.defaultValue - case reflect.Bool: - val, err = strconv.ParseBool(opts.defaultValue) - case reflect.Uint: - val, err = strconv.ParseUint(opts.defaultValue, 10, 0) - case reflect.Uint8: - val, err = strconv.ParseUint(opts.defaultValue, 10, 8) - case reflect.Uint16: - val, err = strconv.ParseUint(opts.defaultValue, 10, 16) - case reflect.Uint32: - val, err = strconv.ParseUint(opts.defaultValue, 10, 32) - case reflect.Uint64: - val, err = strconv.ParseUint(opts.defaultValue, 10, 64) - case reflect.Int: - val, err = strconv.ParseInt(opts.defaultValue, 10, 0) - case reflect.Int8: - val, err = strconv.ParseInt(opts.defaultValue, 10, 8) - case reflect.Int16: - val, err = strconv.ParseInt(opts.defaultValue, 10, 16) - case reflect.Int32: - val, err = strconv.ParseInt(opts.defaultValue, 10, 32) - case reflect.Int64: - // Check if the provided number has a non-numeric extension. - var hasExtension bool - if len(opts.defaultValue) > 0 { - lastChar := opts.defaultValue[len(opts.defaultValue)-1] - if lastChar < '0' || lastChar > '9' { - hasExtension = true - } - } - // If the value is a time.Duration with extension, parse as duration. - // If the value is an int64 or a time.Duration without extension, parse as number. - if hasExtension && mvalf.Type().String() == "time.Duration" { - val, err = time.ParseDuration(opts.defaultValue) - } else { - val, err = strconv.ParseInt(opts.defaultValue, 10, 64) - } - case reflect.Float32: - val, err = strconv.ParseFloat(opts.defaultValue, 32) - case reflect.Float64: - val, err = strconv.ParseFloat(opts.defaultValue, 64) - default: - return mvalf, fmt.Errorf("unsupported field type for default option") - } - - if err != nil { - return mvalf, err - } - mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type())) - } - - // save the old behavior above and try to check structs - if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct { - tmpTval := tval - if !mtypef.Anonymous { - tmpTval = nil - } - fval := mval.Field(i) - v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval) - if err != nil { - return v, err - } - mval.Field(i).Set(v) - } - } - } - case reflect.Map: - mval = reflect.MakeMap(mtype) - for _, key := range tval.Keys() { - d.visitor.push(key) - // TODO: path splits key - val := tval.GetPath([]string{key}) - mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) - if err != nil { - return mval, formatError(err, tval.GetPositionPath([]string{key})) - } - mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) - d.visitor.pop() - } - } - return mval, nil -} - -// Convert toml value to marshal struct/map slice, using marshal type -func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { - mval, err := makeSliceOrArray(mtype, len(tval)) - if err != nil { - return mval, err - } - - for i := 0; i < len(tval); i++ { - d.visitor.push(strconv.Itoa(i)) - val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - d.visitor.pop() - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { - mval, err := makeSliceOrArray(mtype, len(tval)) - if err != nil { - return mval, err - } - - for i := 0; i < len(tval); i++ { - val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - val := reflect.ValueOf(tval) - length := val.Len() - - mval, err := makeSliceOrArray(mtype, length) - if err != nil { - return mval, err - } - - for i := 0; i < length; i++ { - val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Create a new slice or a new array with specified length -func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) { - var mval reflect.Value - switch mtype.Kind() { - case reflect.Slice: - mval = reflect.MakeSlice(mtype, tLength, tLength) - case reflect.Array: - mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem() - if tLength > mtype.Len() { - return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len()) - } - } - return mval, nil -} - -// Convert toml value to marshal value, using marshal type. When mval1 is non-nil -// and the given type is a struct value, merge fields into it. -func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) - } - - switch t := tval.(type) { - case *Tree: - var mval11 *reflect.Value - if mtype.Kind() == reflect.Struct { - mval11 = mval1 - } - - if isTree(mtype) { - return d.valueFromTree(mtype, t, mval11) - } - - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil) - } else { - return d.valueFromToml(mval1.Elem().Type(), t, nil) - } - } - - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) - case []*Tree: - if isTreeSequence(mtype) { - return d.valueFromTreeSlice(mtype, t) - } - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t) - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) - case []interface{}: - d.visitor.visit() - if isOtherSequence(mtype) { - return d.valueFromOtherSlice(mtype, t) - } - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t) - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) - default: - d.visitor.visit() - mvalPtr := reflect.New(mtype) - - // Check if pointer to value implements the Unmarshaler interface. - if isCustomUnmarshaler(mvalPtr.Type()) { - if err := callCustomUnmarshaler(mvalPtr, tval); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) - } - return mvalPtr.Elem(), nil - } - - // Check if pointer to value implements the encoding.TextUnmarshaler. - if isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) { - if err := d.unmarshalText(tval, mvalPtr); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err) - } - return mvalPtr.Elem(), nil - } - - switch mtype.Kind() { - case reflect.Bool, reflect.Struct: - val := reflect.ValueOf(tval) - - switch val.Type() { - case localDateType: - localDate := val.Interface().(LocalDate) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil - } - case localDateTimeType: - localDateTime := val.Interface().(LocalDateTime) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date( - localDateTime.Date.Year, - localDateTime.Date.Month, - localDateTime.Date.Day, - localDateTime.Time.Hour, - localDateTime.Time.Minute, - localDateTime.Time.Second, - localDateTime.Time.Nanosecond, - time.Local)), nil - } - } - - // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.String: - val := reflect.ValueOf(tval) - // stupidly, int64 is convertible to string. So special case this. - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - val := reflect.ValueOf(tval) - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { - d, err := time.ParseDuration(val.String()) - if err != nil { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) - } - return reflect.ValueOf(d), nil - } - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - if val.Type().Kind() != reflect.Uint64 && val.Convert(reflect.TypeOf(int(1))).Int() < 0 { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Float32, reflect.Float64: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Interface: - if mval1 == nil || mval1.IsNil() { - return reflect.ValueOf(tval), nil - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - case reflect.Slice, reflect.Array: - if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) { - return d.valueFromOtherSliceI(mtype, t) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - default: - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - } - } -} - -func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { - var melem *reflect.Value - - if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) { - elem := mval1.Elem() - melem = &elem - } - - val, err := d.valueFromToml(mtype.Elem(), tval, melem) - if err != nil { - return reflect.ValueOf(nil), err - } - mval := reflect.New(mtype.Elem()) - mval.Elem().Set(val) - return mval, nil -} - -func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error { - var buf bytes.Buffer - fmt.Fprint(&buf, tval) - return callTextUnmarshaler(mval, buf.Bytes()) -} - -func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { - tag := vf.Tag.Get(an.tag) - parse := strings.Split(tag, ",") - var comment string - if c := vf.Tag.Get(an.comment); c != "" { - comment = c - } - commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) - multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) - literal, _ := strconv.ParseBool(vf.Tag.Get(an.literal)) - defaultValue := vf.Tag.Get(tagDefault) - result := tomlOpts{ - name: vf.Name, - nameFromTag: false, - comment: comment, - commented: commented, - multiline: multiline, - literal: literal, - include: true, - omitempty: false, - defaultValue: defaultValue, - } - if parse[0] != "" { - if parse[0] == "-" && len(parse) == 1 { - result.include = false - } else { - result.name = strings.Trim(parse[0], " ") - result.nameFromTag = true - } - } - if vf.PkgPath != "" { - result.include = false - } - if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { - result.omitempty = true - } - if vf.Type.Kind() == reflect.Ptr { - result.omitempty = true - } - return result -} - -func isZero(val reflect.Value) bool { - switch val.Type().Kind() { - case reflect.Slice, reflect.Array, reflect.Map: - return val.Len() == 0 - default: - return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) - } -} - -func formatError(err error, pos Position) error { - if err.Error()[0] == '(' { // Error already contains position information - return err - } - return fmt.Errorf("%s: %s", pos, err) -} - -// visitorState keeps track of which keys were unmarshaled. -type visitorState struct { - tree *Tree - path []string - keys map[string]struct{} - active bool -} - -func newVisitorState(tree *Tree) visitorState { - path, result := []string{}, map[string]struct{}{} - insertKeys(path, result, tree) - return visitorState{ - tree: tree, - path: path[:0], - keys: result, - active: true, - } -} - -func (s *visitorState) push(key string) { - if s.active { - s.path = append(s.path, key) - } -} - -func (s *visitorState) pop() { - if s.active { - s.path = s.path[:len(s.path)-1] - } -} - -func (s *visitorState) visit() { - if s.active { - delete(s.keys, strings.Join(s.path, ".")) - } -} - -func (s *visitorState) visitAll() { - if s.active { - for k := range s.keys { - if strings.HasPrefix(k, strings.Join(s.path, ".")) { - delete(s.keys, k) - } - } - } -} - -func (s *visitorState) validate() error { - if !s.active { - return nil - } - undecoded := make([]string, 0, len(s.keys)) - for key := range s.keys { - undecoded = append(undecoded, key) - } - sort.Strings(undecoded) - if len(undecoded) > 0 { - return fmt.Errorf("undecoded keys: %q", undecoded) - } - return nil -} - -func insertKeys(path []string, m map[string]struct{}, tree *Tree) { - for k, v := range tree.values { - switch node := v.(type) { - case []*Tree: - for i, item := range node { - insertKeys(append(path, k, strconv.Itoa(i)), m, item) - } - case *Tree: - insertKeys(append(path, k), m, node) - case *tomlValue: - m[strings.Join(append(path, k), ".")] = struct{}{} - } - } -} diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml deleted file mode 100644 index 792b72ed72..0000000000 --- a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml +++ /dev/null @@ -1,39 +0,0 @@ -title = "TOML Marshal Testing" - -[basic_lists] - floats = [12.3,45.6,78.9] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - ints = [8001,8001,8002] - uints = [5002,5003] - strings = ["One","Two","Three"] - -[[subdocptrs]] - name = "Second" - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.second] - name = "Second" - - [subdoc.first] - name = "First" - -[basic] - uint = 5001 - bool = true - float = 123.4 - float64 = 123.456782132399 - int = 5000 - string = "Bite me" - date = 1979-05-27T07:32:00Z - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml deleted file mode 100644 index ba5e110bf0..0000000000 --- a/vendor/github.com/pelletier/go-toml/marshal_test.toml +++ /dev/null @@ -1,39 +0,0 @@ -title = "TOML Marshal Testing" - -[basic] - bool = true - date = 1979-05-27T07:32:00Z - float = 123.4 - float64 = 123.456782132399 - int = 5000 - string = "Bite me" - uint = 5001 - -[basic_lists] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - floats = [12.3,45.6,78.9] - ints = [8001,8001,8002] - strings = ["One","Two","Three"] - uints = [5002,5003] - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.first] - name = "First" - - [subdoc.second] - name = "Second" - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" - -[[subdocptrs]] - name = "Second" diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go deleted file mode 100644 index b3726d0dd8..0000000000 --- a/vendor/github.com/pelletier/go-toml/parser.go +++ /dev/null @@ -1,507 +0,0 @@ -// TOML Parser. - -package toml - -import ( - "errors" - "fmt" - "math" - "reflect" - "strconv" - "strings" - "time" -) - -type tomlParser struct { - flowIdx int - flow []token - tree *Tree - currentTable []string - seenTableKeys []string -} - -type tomlParserStateFn func() tomlParserStateFn - -// Formats and panics an error message based on a token -func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { - panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) -} - -func (p *tomlParser) run() { - for state := p.parseStart; state != nil; { - state = state() - } -} - -func (p *tomlParser) peek() *token { - if p.flowIdx >= len(p.flow) { - return nil - } - return &p.flow[p.flowIdx] -} - -func (p *tomlParser) assume(typ tokenType) { - tok := p.getToken() - if tok == nil { - p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) - } - if tok.typ != typ { - p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) - } -} - -func (p *tomlParser) getToken() *token { - tok := p.peek() - if tok == nil { - return nil - } - p.flowIdx++ - return tok -} - -func (p *tomlParser) parseStart() tomlParserStateFn { - tok := p.peek() - - // end of stream, parsing is finished - if tok == nil { - return nil - } - - switch tok.typ { - case tokenDoubleLeftBracket: - return p.parseGroupArray - case tokenLeftBracket: - return p.parseGroup - case tokenKey: - return p.parseAssign - case tokenEOF: - return nil - case tokenError: - p.raiseError(tok, "parsing error: %s", tok.String()) - default: - p.raiseError(tok, "unexpected token %s", tok.typ) - } - return nil -} - -func (p *tomlParser) parseGroupArray() tomlParserStateFn { - startToken := p.getToken() // discard the [[ - key := p.getToken() - if key.typ != tokenKeyGroupArray { - p.raiseError(key, "unexpected token %s, was expecting a table array key", key) - } - - // get or create table array element at the indicated part in the path - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries - destTree := p.tree.GetPath(keys) - var array []*Tree - if destTree == nil { - array = make([]*Tree, 0) - } else if target, ok := destTree.([]*Tree); ok && target != nil { - array = destTree.([]*Tree) - } else { - p.raiseError(key, "key %s is already assigned and not of type table array", key) - } - p.currentTable = keys - - // add a new tree to the end of the table array - newTree := newTree() - newTree.position = startToken.Position - array = append(array, newTree) - p.tree.SetPath(p.currentTable, array) - - // remove all keys that were children of this table array - prefix := key.val + "." - found := false - for ii := 0; ii < len(p.seenTableKeys); { - tableKey := p.seenTableKeys[ii] - if strings.HasPrefix(tableKey, prefix) { - p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) - } else { - found = (tableKey == key.val) - ii++ - } - } - - // keep this key name from use by other kinds of assignments - if !found { - p.seenTableKeys = append(p.seenTableKeys, key.val) - } - - // move to next parser state - p.assume(tokenDoubleRightBracket) - return p.parseStart -} - -func (p *tomlParser) parseGroup() tomlParserStateFn { - startToken := p.getToken() // discard the [ - key := p.getToken() - if key.typ != tokenKeyGroup { - p.raiseError(key, "unexpected token %s, was expecting a table key", key) - } - for _, item := range p.seenTableKeys { - if item == key.val { - p.raiseError(key, "duplicated tables") - } - } - - p.seenTableKeys = append(p.seenTableKeys, key.val) - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - if err := p.tree.createSubTree(keys, startToken.Position); err != nil { - p.raiseError(key, "%s", err) - } - destTree := p.tree.GetPath(keys) - if target, ok := destTree.(*Tree); ok && target != nil && target.inline { - p.raiseError(key, "could not re-define exist inline table or its sub-table : %s", - strings.Join(keys, ".")) - } - p.assume(tokenRightBracket) - p.currentTable = keys - return p.parseStart -} - -func (p *tomlParser) parseAssign() tomlParserStateFn { - key := p.getToken() - p.assume(tokenEqual) - - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err.Error()) - } - - value := p.parseRvalue() - var tableKey []string - if len(p.currentTable) > 0 { - tableKey = p.currentTable - } else { - tableKey = []string{} - } - - prefixKey := parsedKey[0 : len(parsedKey)-1] - tableKey = append(tableKey, prefixKey...) - - // find the table to assign, looking out for arrays of tables - var targetNode *Tree - switch node := p.tree.GetPath(tableKey).(type) { - case []*Tree: - targetNode = node[len(node)-1] - case *Tree: - targetNode = node - case nil: - // create intermediate - if err := p.tree.createSubTree(tableKey, key.Position); err != nil { - p.raiseError(key, "could not create intermediate group: %s", err) - } - targetNode = p.tree.GetPath(tableKey).(*Tree) - default: - p.raiseError(key, "Unknown table type for path: %s", - strings.Join(tableKey, ".")) - } - - if targetNode.inline { - p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s", - strings.Join(tableKey, ".")) - } - - // assign value to the found table - keyVal := parsedKey[len(parsedKey)-1] - localKey := []string{keyVal} - finalKey := append(tableKey, keyVal) - if targetNode.GetPath(localKey) != nil { - p.raiseError(key, "The following key was defined twice: %s", - strings.Join(finalKey, ".")) - } - var toInsert interface{} - - switch value.(type) { - case *Tree, []*Tree: - toInsert = value - default: - toInsert = &tomlValue{value: value, position: key.Position} - } - targetNode.values[keyVal] = toInsert - return p.parseStart -} - -var errInvalidUnderscore = errors.New("invalid use of _ in number") - -func numberContainsInvalidUnderscore(value string) error { - // For large numbers, you may use underscores between digits to enhance - // readability. Each underscore must be surrounded by at least one digit on - // each side. - - hasBefore := false - for idx, r := range value { - if r == '_' { - if !hasBefore || idx+1 >= len(value) { - // can't end with an underscore - return errInvalidUnderscore - } - } - hasBefore = isDigit(r) - } - return nil -} - -var errInvalidUnderscoreHex = errors.New("invalid use of _ in hex number") - -func hexNumberContainsInvalidUnderscore(value string) error { - hasBefore := false - for idx, r := range value { - if r == '_' { - if !hasBefore || idx+1 >= len(value) { - // can't end with an underscore - return errInvalidUnderscoreHex - } - } - hasBefore = isHexDigit(r) - } - return nil -} - -func cleanupNumberToken(value string) string { - cleanedVal := strings.Replace(value, "_", "", -1) - return cleanedVal -} - -func (p *tomlParser) parseRvalue() interface{} { - tok := p.getToken() - if tok == nil || tok.typ == tokenEOF { - p.raiseError(tok, "expecting a value") - } - - switch tok.typ { - case tokenString: - return tok.val - case tokenTrue: - return true - case tokenFalse: - return false - case tokenInf: - if tok.val[0] == '-' { - return math.Inf(-1) - } - return math.Inf(1) - case tokenNan: - return math.NaN() - case tokenInteger: - cleanedVal := cleanupNumberToken(tok.val) - base := 10 - s := cleanedVal - checkInvalidUnderscore := numberContainsInvalidUnderscore - if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { - switch cleanedVal[1] { - case 'x': - checkInvalidUnderscore = hexNumberContainsInvalidUnderscore - base = 16 - case 'o': - base = 8 - case 'b': - base = 2 - default: - panic("invalid base") // the lexer should catch this first - } - s = cleanedVal[2:] - } - - err := checkInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - - var val interface{} - val, err = strconv.ParseInt(s, base, 64) - if err == nil { - return val - } - - if s[0] != '-' { - if val, err = strconv.ParseUint(s, base, 64); err == nil { - return val - } - } - p.raiseError(tok, "%s", err) - case tokenFloat: - err := numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - cleanedVal := cleanupNumberToken(tok.val) - val, err := strconv.ParseFloat(cleanedVal, 64) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLocalTime: - val, err := ParseLocalTime(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLocalDate: - // a local date may be followed by: - // * nothing: this is a local date - // * a local time: this is a local date-time - - next := p.peek() - if next == nil || next.typ != tokenLocalTime { - val, err := ParseLocalDate(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - } - - localDate := tok - localTime := p.getToken() - - next = p.peek() - if next == nil || next.typ != tokenTimeOffset { - v := localDate.val + "T" + localTime.val - val, err := ParseLocalDateTime(v) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - } - - offset := p.getToken() - - layout := time.RFC3339Nano - v := localDate.val + "T" + localTime.val + offset.val - val, err := time.ParseInLocation(layout, v, time.UTC) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLeftBracket: - return p.parseArray() - case tokenLeftCurlyBrace: - return p.parseInlineTable() - case tokenEqual: - p.raiseError(tok, "cannot have multiple equals for the same key") - case tokenError: - p.raiseError(tok, "%s", tok) - default: - panic(fmt.Errorf("unhandled token: %v", tok)) - } - - return nil -} - -func tokenIsComma(t *token) bool { - return t != nil && t.typ == tokenComma -} - -func (p *tomlParser) parseInlineTable() *Tree { - tree := newTree() - var previous *token -Loop: - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated inline table") - } - switch follow.typ { - case tokenRightCurlyBrace: - p.getToken() - break Loop - case tokenKey, tokenInteger, tokenString: - if !tokenIsComma(previous) && previous != nil { - p.raiseError(follow, "comma expected between fields in inline table") - } - key := p.getToken() - p.assume(tokenEqual) - - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err) - } - - value := p.parseRvalue() - tree.SetPath(parsedKey, value) - case tokenComma: - if tokenIsComma(previous) { - p.raiseError(follow, "need field between two commas in inline table") - } - p.getToken() - default: - p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) - } - previous = follow - } - if tokenIsComma(previous) { - p.raiseError(previous, "trailing comma at the end of inline table") - } - tree.inline = true - return tree -} - -func (p *tomlParser) parseArray() interface{} { - var array []interface{} - arrayType := reflect.TypeOf(newTree()) - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ == tokenRightBracket { - p.getToken() - break - } - val := p.parseRvalue() - if reflect.TypeOf(val) != arrayType { - arrayType = nil - } - array = append(array, val) - follow = p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ != tokenRightBracket && follow.typ != tokenComma { - p.raiseError(follow, "missing comma") - } - if follow.typ == tokenComma { - p.getToken() - } - } - - // if the array is a mixed-type array or its length is 0, - // don't convert it to a table array - if len(array) <= 0 { - arrayType = nil - } - // An array of Trees is actually an array of inline - // tables, which is a shorthand for a table array. If the - // array was not converted from []interface{} to []*Tree, - // the two notations would not be equivalent. - if arrayType == reflect.TypeOf(newTree()) { - tomlArray := make([]*Tree, len(array)) - for i, v := range array { - tomlArray[i] = v.(*Tree) - } - return tomlArray - } - return array -} - -func parseToml(flow []token) *Tree { - result := newTree() - result.position = Position{1, 1} - parser := &tomlParser{ - flowIdx: 0, - flow: flow, - tree: result, - currentTable: make([]string, 0), - seenTableKeys: make([]string, 0), - } - parser.run() - return result -} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go deleted file mode 100644 index c17bff87ba..0000000000 --- a/vendor/github.com/pelletier/go-toml/position.go +++ /dev/null @@ -1,29 +0,0 @@ -// Position support for go-toml - -package toml - -import ( - "fmt" -) - -// Position of a document element within a TOML document. -// -// Line and Col are both 1-indexed positions for the element's line number and -// column number, respectively. Values of zero or less will cause Invalid(), -// to return true. -type Position struct { - Line int // line within the document - Col int // column within the line -} - -// String representation of the position. -// Displays 1-indexed line and column numbers. -func (p Position) String() string { - return fmt.Sprintf("(%d, %d)", p.Line, p.Col) -} - -// Invalid returns whether or not the position is valid (i.e. with negative or -// null values) -func (p Position) Invalid() bool { - return p.Line <= 0 || p.Col <= 0 -} diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go deleted file mode 100644 index b437fdd3b6..0000000000 --- a/vendor/github.com/pelletier/go-toml/token.go +++ /dev/null @@ -1,136 +0,0 @@ -package toml - -import "fmt" - -// Define tokens -type tokenType int - -const ( - eof = -(iota + 1) -) - -const ( - tokenError tokenType = iota - tokenEOF - tokenComment - tokenKey - tokenString - tokenInteger - tokenTrue - tokenFalse - tokenFloat - tokenInf - tokenNan - tokenEqual - tokenLeftBracket - tokenRightBracket - tokenLeftCurlyBrace - tokenRightCurlyBrace - tokenLeftParen - tokenRightParen - tokenDoubleLeftBracket - tokenDoubleRightBracket - tokenLocalDate - tokenLocalTime - tokenTimeOffset - tokenKeyGroup - tokenKeyGroupArray - tokenComma - tokenColon - tokenDollar - tokenStar - tokenQuestion - tokenDot - tokenDotDot - tokenEOL -) - -var tokenTypeNames = []string{ - "Error", - "EOF", - "Comment", - "Key", - "String", - "Integer", - "True", - "False", - "Float", - "Inf", - "NaN", - "=", - "[", - "]", - "{", - "}", - "(", - ")", - "]]", - "[[", - "LocalDate", - "LocalTime", - "TimeOffset", - "KeyGroup", - "KeyGroupArray", - ",", - ":", - "$", - "*", - "?", - ".", - "..", - "EOL", -} - -type token struct { - Position - typ tokenType - val string -} - -func (tt tokenType) String() string { - idx := int(tt) - if idx < len(tokenTypeNames) { - return tokenTypeNames[idx] - } - return "Unknown" -} - -func (t token) String() string { - switch t.typ { - case tokenEOF: - return "EOF" - case tokenError: - return t.val - } - - return fmt.Sprintf("%q", t.val) -} - -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -func isAlphanumeric(r rune) bool { - return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' -} - -func isKeyChar(r rune) bool { - // Keys start with the first character that isn't whitespace or [ and end - // with the last non-whitespace character before the equals sign. Keys - // cannot contain a # character." - return !(r == '\r' || r == '\n' || r == eof || r == '=') -} - -func isKeyStartChar(r rune) bool { - return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') -} - -func isDigit(r rune) bool { - return '0' <= r && r <= '9' -} - -func isHexDigit(r rune) bool { - return isDigit(r) || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go deleted file mode 100644 index 5541b941f8..0000000000 --- a/vendor/github.com/pelletier/go-toml/toml.go +++ /dev/null @@ -1,533 +0,0 @@ -package toml - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "strings" -) - -type tomlValue struct { - value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list - comment string - commented bool - multiline bool - literal bool - position Position -} - -// Tree is the result of the parsing of a TOML file. -type Tree struct { - values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree - comment string - commented bool - inline bool - position Position -} - -func newTree() *Tree { - return newTreeWithPosition(Position{}) -} - -func newTreeWithPosition(pos Position) *Tree { - return &Tree{ - values: make(map[string]interface{}), - position: pos, - } -} - -// TreeFromMap initializes a new Tree object using the given map. -func TreeFromMap(m map[string]interface{}) (*Tree, error) { - result, err := toTree(m) - if err != nil { - return nil, err - } - return result.(*Tree), nil -} - -// Position returns the position of the tree. -func (t *Tree) Position() Position { - return t.position -} - -// Has returns a boolean indicating if the given key exists. -func (t *Tree) Has(key string) bool { - if key == "" { - return false - } - return t.HasPath(strings.Split(key, ".")) -} - -// HasPath returns true if the given path of keys exists, false otherwise. -func (t *Tree) HasPath(keys []string) bool { - return t.GetPath(keys) != nil -} - -// Keys returns the keys of the toplevel tree (does not recurse). -func (t *Tree) Keys() []string { - keys := make([]string, len(t.values)) - i := 0 - for k := range t.values { - keys[i] = k - i++ - } - return keys -} - -// Get the value at key in the Tree. -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// If you need to retrieve non-bare keys, use GetPath. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) Get(key string) interface{} { - if key == "" { - return t - } - return t.GetPath(strings.Split(key, ".")) -} - -// GetPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.value - default: - return node - } -} - -// GetArray returns the value at key in the Tree. -// It returns []string, []int64, etc type if key has homogeneous lists -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetArray(key string) interface{} { - if key == "" { - return t - } - return t.GetArrayPath(strings.Split(key, ".")) -} - -// GetArrayPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetArrayPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - switch n := node.value.(type) { - case []interface{}: - return getArray(n) - default: - return node.value - } - default: - return node - } -} - -// if homogeneous array, then return slice type object over []interface{} -func getArray(n []interface{}) interface{} { - var s []string - var i64 []int64 - var f64 []float64 - var bl []bool - for _, value := range n { - switch v := value.(type) { - case string: - s = append(s, v) - case int64: - i64 = append(i64, v) - case float64: - f64 = append(f64, v) - case bool: - bl = append(bl, v) - default: - return n - } - } - if len(s) == len(n) { - return s - } else if len(i64) == len(n) { - return i64 - } else if len(f64) == len(n) { - return f64 - } else if len(bl) == len(n) { - return bl - } - return n -} - -// GetPosition returns the position of the given key. -func (t *Tree) GetPosition(key string) Position { - if key == "" { - return t.position - } - return t.GetPositionPath(strings.Split(key, ".")) -} - -// SetPositionPath sets the position of element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree position is set. -func (t *Tree) SetPositionPath(keys []string, pos Position) { - if len(keys) == 0 { - t.position = pos - return - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return - } - subtree = node[len(node)-1] - default: - return - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - node.position = pos - return - case *Tree: - node.position = pos - return - case []*Tree: - // go to most recent element - if len(node) == 0 { - return - } - node[len(node)-1].position = pos - return - } -} - -// GetPositionPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPositionPath(keys []string) Position { - if len(keys) == 0 { - return t.position - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return Position{0, 0} - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - subtree = node[len(node)-1] - default: - return Position{0, 0} - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.position - case *Tree: - return node.position - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - return node[len(node)-1].position - default: - return Position{0, 0} - } -} - -// GetDefault works like Get but with a default value -func (t *Tree) GetDefault(key string, def interface{}) interface{} { - val := t.Get(key) - if val == nil { - return def - } - return val -} - -// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour. -// The default values within the struct are valid default options. -type SetOptions struct { - Comment string - Commented bool - Multiline bool - Literal bool -} - -// SetWithOptions is the same as Set, but allows you to provide formatting -// instructions to the key, that will be used by Marshal(). -func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { - t.SetPathWithOptions(strings.Split(key, "."), opts, value) -} - -// SetPathWithOptions is the same as SetPath, but allows you to provide -// formatting instructions to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { - subtree := t - for i, intermediateKey := range keys[:len(keys)-1] { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) - subtree.values[intermediateKey] = nextTree // add new element here - } - switch node := nextTree.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - // create element if it does not exist - node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) - subtree.values[intermediateKey] = node - } - subtree = node[len(node)-1] - } - } - - var toInsert interface{} - - switch v := value.(type) { - case *Tree: - v.comment = opts.Comment - v.commented = opts.Commented - toInsert = value - case []*Tree: - for i := range v { - v[i].commented = opts.Commented - } - toInsert = value - case *tomlValue: - v.comment = opts.Comment - v.commented = opts.Commented - v.multiline = opts.Multiline - v.literal = opts.Literal - toInsert = v - default: - toInsert = &tomlValue{value: value, - comment: opts.Comment, - commented: opts.Commented, - multiline: opts.Multiline, - literal: opts.Literal, - position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} - } - - subtree.values[keys[len(keys)-1]] = toInsert -} - -// Set an element in the tree. -// Key is a dot-separated path (e.g. a.b.c). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) Set(key string, value interface{}) { - t.SetWithComment(key, "", false, value) -} - -// SetWithComment is the same as Set, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { - t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) -} - -// SetPath sets an element in the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) SetPath(keys []string, value interface{}) { - t.SetPathWithComment(keys, "", false, value) -} - -// SetPathWithComment is the same as SetPath, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { - t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) -} - -// Delete removes a key from the tree. -// Key is a dot-separated path (e.g. a.b.c). -func (t *Tree) Delete(key string) error { - keys, err := parseKey(key) - if err != nil { - return err - } - return t.DeletePath(keys) -} - -// DeletePath removes a key from the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -func (t *Tree) DeletePath(keys []string) error { - keyLen := len(keys) - if keyLen == 1 { - delete(t.values, keys[0]) - return nil - } - tree := t.GetPath(keys[:keyLen-1]) - item := keys[keyLen-1] - switch node := tree.(type) { - case *Tree: - delete(node.values, item) - return nil - } - return errors.New("no such key to delete") -} - -// createSubTree takes a tree and a key and create the necessary intermediate -// subtrees to create a subtree at that point. In-place. -// -// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] -// and tree[a][b][c] -// -// Returns nil on success, error object on failure -func (t *Tree) createSubTree(keys []string, pos Position) error { - subtree := t - for i, intermediateKey := range keys { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) - tree.position = pos - tree.inline = subtree.inline - subtree.values[intermediateKey] = tree - nextTree = tree - } - - switch node := nextTree.(type) { - case []*Tree: - subtree = node[len(node)-1] - case *Tree: - subtree = node - default: - return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", - strings.Join(keys, "."), intermediateKey, nextTree, nextTree) - } - } - return nil -} - -// LoadBytes creates a Tree from a []byte. -func LoadBytes(b []byte) (tree *Tree, err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = fmt.Errorf("%s", r) - } - }() - - if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { - b = b[4:] - } else if len(b) >= 3 && hasUTF8BOM3(b) { - b = b[3:] - } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { - b = b[2:] - } - - tree = parseToml(lexToml(b)) - return -} - -func hasUTF16BigEndianBOM2(b []byte) bool { - return b[0] == 0xFE && b[1] == 0xFF -} - -func hasUTF16LittleEndianBOM2(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE -} - -func hasUTF8BOM3(b []byte) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -func hasUTF32BigEndianBOM4(b []byte) bool { - return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF -} - -func hasUTF32LittleEndianBOM4(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 -} - -// LoadReader creates a Tree from any io.Reader. -func LoadReader(reader io.Reader) (tree *Tree, err error) { - inputBytes, err := ioutil.ReadAll(reader) - if err != nil { - return - } - tree, err = LoadBytes(inputBytes) - return -} - -// Load creates a Tree from a string. -func Load(content string) (tree *Tree, err error) { - return LoadBytes([]byte(content)) -} - -// LoadFile creates a Tree from a file. -func LoadFile(path string) (tree *Tree, err error) { - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer file.Close() - return LoadReader(file) -} diff --git a/vendor/github.com/pelletier/go-toml/tomlpub.go b/vendor/github.com/pelletier/go-toml/tomlpub.go deleted file mode 100644 index 4136b46254..0000000000 --- a/vendor/github.com/pelletier/go-toml/tomlpub.go +++ /dev/null @@ -1,71 +0,0 @@ -package toml - -// PubTOMLValue wrapping tomlValue in order to access all properties from outside. -type PubTOMLValue = tomlValue - -func (ptv *PubTOMLValue) Value() interface{} { - return ptv.value -} -func (ptv *PubTOMLValue) Comment() string { - return ptv.comment -} -func (ptv *PubTOMLValue) Commented() bool { - return ptv.commented -} -func (ptv *PubTOMLValue) Multiline() bool { - return ptv.multiline -} -func (ptv *PubTOMLValue) Position() Position { - return ptv.position -} - -func (ptv *PubTOMLValue) SetValue(v interface{}) { - ptv.value = v -} -func (ptv *PubTOMLValue) SetComment(s string) { - ptv.comment = s -} -func (ptv *PubTOMLValue) SetCommented(c bool) { - ptv.commented = c -} -func (ptv *PubTOMLValue) SetMultiline(m bool) { - ptv.multiline = m -} -func (ptv *PubTOMLValue) SetPosition(p Position) { - ptv.position = p -} - -// PubTree wrapping Tree in order to access all properties from outside. -type PubTree = Tree - -func (pt *PubTree) Values() map[string]interface{} { - return pt.values -} - -func (pt *PubTree) Comment() string { - return pt.comment -} - -func (pt *PubTree) Commented() bool { - return pt.commented -} - -func (pt *PubTree) Inline() bool { - return pt.inline -} - -func (pt *PubTree) SetValues(v map[string]interface{}) { - pt.values = v -} - -func (pt *PubTree) SetComment(c string) { - pt.comment = c -} - -func (pt *PubTree) SetCommented(c bool) { - pt.commented = c -} - -func (pt *PubTree) SetInline(i bool) { - pt.inline = i -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go deleted file mode 100644 index 80353500a0..0000000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_create.go +++ /dev/null @@ -1,155 +0,0 @@ -package toml - -import ( - "fmt" - "reflect" - "time" -) - -var kindToType = [reflect.String + 1]reflect.Type{ - reflect.Bool: reflect.TypeOf(true), - reflect.String: reflect.TypeOf(""), - reflect.Float32: reflect.TypeOf(float64(1)), - reflect.Float64: reflect.TypeOf(float64(1)), - reflect.Int: reflect.TypeOf(int64(1)), - reflect.Int8: reflect.TypeOf(int64(1)), - reflect.Int16: reflect.TypeOf(int64(1)), - reflect.Int32: reflect.TypeOf(int64(1)), - reflect.Int64: reflect.TypeOf(int64(1)), - reflect.Uint: reflect.TypeOf(uint64(1)), - reflect.Uint8: reflect.TypeOf(uint64(1)), - reflect.Uint16: reflect.TypeOf(uint64(1)), - reflect.Uint32: reflect.TypeOf(uint64(1)), - reflect.Uint64: reflect.TypeOf(uint64(1)), -} - -// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. -// supported values: -// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 -func typeFor(k reflect.Kind) reflect.Type { - if k > 0 && int(k) < len(kindToType) { - return kindToType[k] - } - return nil -} - -func simpleValueCoercion(object interface{}) (interface{}, error) { - switch original := object.(type) { - case string, bool, int64, uint64, float64, time.Time: - return original, nil - case int: - return int64(original), nil - case int8: - return int64(original), nil - case int16: - return int64(original), nil - case int32: - return int64(original), nil - case uint: - return uint64(original), nil - case uint8: - return uint64(original), nil - case uint16: - return uint64(original), nil - case uint32: - return uint64(original), nil - case float32: - return float64(original), nil - case fmt.Stringer: - return original.String(), nil - case []interface{}: - value := reflect.ValueOf(original) - length := value.Len() - arrayValue := reflect.MakeSlice(value.Type(), 0, length) - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return arrayValue.Interface(), nil - default: - return nil, fmt.Errorf("cannot convert type %T to Tree", object) - } -} - -func sliceToTree(object interface{}) (interface{}, error) { - // arrays are a bit tricky, since they can represent either a - // collection of simple values, which is represented by one - // *tomlValue, or an array of tables, which is represented by an - // array of *Tree. - - // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice - value := reflect.ValueOf(object) - insideType := value.Type().Elem() - length := value.Len() - if length > 0 { - insideType = reflect.ValueOf(value.Index(0).Interface()).Type() - } - if insideType.Kind() == reflect.Map { - // this is considered as an array of tables - tablesArray := make([]*Tree, 0, length) - for i := 0; i < length; i++ { - table := value.Index(i) - tree, err := toTree(table.Interface()) - if err != nil { - return nil, err - } - tablesArray = append(tablesArray, tree.(*Tree)) - } - return tablesArray, nil - } - - sliceType := typeFor(insideType.Kind()) - if sliceType == nil { - sliceType = insideType - } - - arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) - - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil -} - -func toTree(object interface{}) (interface{}, error) { - value := reflect.ValueOf(object) - - if value.Kind() == reflect.Map { - values := map[string]interface{}{} - keys := value.MapKeys() - for _, key := range keys { - if key.Kind() != reflect.String { - if _, ok := key.Interface().(string); !ok { - return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) - } - } - - v := value.MapIndex(key) - newValue, err := toTree(v.Interface()) - if err != nil { - return nil, err - } - values[key.String()] = newValue - } - return &Tree{values: values, position: Position{}}, nil - } - - if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { - return sliceToTree(object) - } - - simpleValue, err := simpleValueCoercion(object) - if err != nil { - return nil, err - } - return &tomlValue{value: simpleValue, position: Position{}}, nil -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go deleted file mode 100644 index c9afbdab76..0000000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ /dev/null @@ -1,552 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "io" - "math" - "math/big" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -type valueComplexity int - -const ( - valueSimple valueComplexity = iota + 1 - valueComplex -) - -type sortNode struct { - key string - complexity valueComplexity -} - -// Encodes a string to a TOML-compliant multi-line string value -// This function is a clone of the existing encodeTomlString function, except that whitespace characters -// are preserved. Quotation marks and backslashes are also not escaped. -func encodeMultilineTomlString(value string, commented string) string { - var b bytes.Buffer - adjacentQuoteCount := 0 - - b.WriteString(commented) - for i, rr := range value { - if rr != '"' { - adjacentQuoteCount = 0 - } else { - adjacentQuoteCount++ - } - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString("\t") - case '\n': - b.WriteString("\n" + commented) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString("\r") - case '"': - if adjacentQuoteCount >= 3 || i == len(value)-1 { - adjacentQuoteCount = 0 - b.WriteString(`\"`) - } else { - b.WriteString(`"`) - } - case '\\': - b.WriteString(`\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -// Encodes a string to a TOML-compliant string value -func encodeTomlString(value string) string { - var b bytes.Buffer - - for _, rr := range value { - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString(`\t`) - case '\n': - b.WriteString(`\n`) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString(`\r`) - case '"': - b.WriteString(`\"`) - case '\\': - b.WriteString(`\\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -func tomlTreeStringRepresentation(t *Tree, ord MarshalOrder) (string, error) { - var orderedVals []sortNode - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) - } - - var values []string - for _, node := range orderedVals { - k := node.key - v := t.values[k] - - repr, err := tomlValueStringRepresentation(v, "", "", ord, false) - if err != nil { - return "", err - } - values = append(values, quoteKeyIfNeeded(k)+" = "+repr) - } - return "{ " + strings.Join(values, ", ") + " }", nil -} - -func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { - // this interface check is added to dereference the change made in the writeTo function. - // That change was made to allow this function to see formatting options. - tv, ok := v.(*tomlValue) - if ok { - v = tv.value - } else { - tv = &tomlValue{} - } - - switch value := v.(type) { - case uint64: - return strconv.FormatUint(value, 10), nil - case int64: - return strconv.FormatInt(value, 10), nil - case float64: - // Default bit length is full 64 - bits := 64 - // Float panics if nan is used - if !math.IsNaN(value) { - // if 32 bit accuracy is enough to exactly show, use 32 - _, acc := big.NewFloat(value).Float32() - if acc == big.Exact { - bits = 32 - } - } - if math.Trunc(value) == value { - return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil - } - return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil - case string: - if tv.multiline { - if tv.literal { - b := strings.Builder{} - b.WriteString("'''\n") - b.Write([]byte(value)) - b.WriteString("\n'''") - return b.String(), nil - } else { - return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil - } - } - return "\"" + encodeTomlString(value) + "\"", nil - case []byte: - b, _ := v.([]byte) - return string(b), nil - case bool: - if value { - return "true", nil - } - return "false", nil - case time.Time: - return value.Format(time.RFC3339), nil - case LocalDate: - return value.String(), nil - case LocalDateTime: - return value.String(), nil - case LocalTime: - return value.String(), nil - case *Tree: - return tomlTreeStringRepresentation(value, ord) - case nil: - return "", nil - } - - rv := reflect.ValueOf(v) - - if rv.Kind() == reflect.Slice { - var values []string - for i := 0; i < rv.Len(); i++ { - item := rv.Index(i).Interface() - itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine) - if err != nil { - return "", err - } - values = append(values, itemRepr) - } - if arraysOneElementPerLine && len(values) > 1 { - stringBuffer := bytes.Buffer{} - valueIndent := indent + ` ` // TODO: move that to a shared encoder state - - stringBuffer.WriteString("[\n") - - for _, value := range values { - stringBuffer.WriteString(valueIndent) - stringBuffer.WriteString(commented + value) - stringBuffer.WriteString(`,`) - stringBuffer.WriteString("\n") - } - - stringBuffer.WriteString(indent + commented + "]") - - return stringBuffer.String(), nil - } - return "[" + strings.Join(values, ", ") + "]", nil - } - return "", fmt.Errorf("unsupported value type %T: %v", v, v) -} - -func getTreeArrayLine(trees []*Tree) (line int) { - // Prevent returning 0 for empty trees - line = int(^uint(0) >> 1) - // get lowest line number >= 0 - for _, tv := range trees { - if tv.position.Line < line || line == 0 { - line = tv.position.Line - } - } - return -} - -func sortByLines(t *Tree) (vals []sortNode) { - var ( - line int - lines []int - tv *Tree - tom *tomlValue - node sortNode - ) - vals = make([]sortNode, 0) - m := make(map[int]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree: - tv = v.(*Tree) - line = tv.position.Line - node = sortNode{key: k, complexity: valueComplex} - case []*Tree: - line = getTreeArrayLine(v.([]*Tree)) - node = sortNode{key: k, complexity: valueComplex} - default: - tom = v.(*tomlValue) - line = tom.position.Line - node = sortNode{key: k, complexity: valueSimple} - } - lines = append(lines, line) - vals = append(vals, node) - m[line] = node - } - sort.Ints(lines) - - for i, line := range lines { - vals[i] = m[line] - } - - return vals -} - -func sortAlphabetical(t *Tree) (vals []sortNode) { - var ( - node sortNode - simpVals []string - compVals []string - ) - vals = make([]sortNode, 0) - m := make(map[string]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree, []*Tree: - node = sortNode{key: k, complexity: valueComplex} - compVals = append(compVals, node.key) - default: - node = sortNode{key: k, complexity: valueSimple} - simpVals = append(simpVals, node.key) - } - vals = append(vals, node) - m[node.key] = node - } - - // Simples first to match previous implementation - sort.Strings(simpVals) - i := 0 - for _, key := range simpVals { - vals[i] = m[key] - i++ - } - - sort.Strings(compVals) - for _, key := range compVals { - vals[i] = m[key] - i++ - } - - return vals -} - -func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { - return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false, false) -} - -func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord MarshalOrder, indentString string, compactComments, parentCommented bool) (int64, error) { - var orderedVals []sortNode - - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) - } - - for _, node := range orderedVals { - switch node.complexity { - case valueComplex: - k := node.key - v := t.values[k] - - combinedKey := quoteKeyIfNeeded(k) - if keyspace != "" { - combinedKey = keyspace + "." + combinedKey - } - - switch node := v.(type) { - // node has to be of those two types given how keys are sorted above - case *Tree: - tv, ok := t.values[k].(*Tree) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - if tv.comment != "" { - comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - var commented string - if parentCommented || t.commented || tv.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || tv.commented) - if err != nil { - return bytesCount, err - } - case []*Tree: - for _, subTree := range node { - var commented string - if parentCommented || t.commented || subTree.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - - bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || subTree.commented) - if err != nil { - return bytesCount, err - } - } - } - default: // Simple - k := node.key - v, ok := t.values[k].(*tomlValue) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - - var commented string - if parentCommented || t.commented || v.commented { - commented = "# " - } - repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - - if v.comment != "" { - comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - if !compactComments { - writtenBytesCountComment, errc := writeStrings(w, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - writtenBytesCountComment, errc := writeStrings(w, indent, start, comment, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - quotedKey := quoteKeyIfNeeded(k) - writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - } - } - - return bytesCount, nil -} - -// quote a key if it does not fit the bare key format (A-Za-z0-9_-) -// quoted keys use the same rules as strings -func quoteKeyIfNeeded(k string) string { - // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain - // keys that have already been quoted. - // not an ideal situation, but good enough of a stop gap. - if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { - return k - } - isBare := true - for _, r := range k { - if !isValidBareChar(r) { - isBare = false - break - } - } - if isBare { - return k - } - return quoteKey(k) -} - -func quoteKey(k string) string { - return "\"" + encodeTomlString(k) + "\"" -} - -func writeStrings(w io.Writer, s ...string) (int, error) { - var n int - for i := range s { - b, err := io.WriteString(w, s[i]) - n += b - if err != nil { - return n, err - } - } - return n, nil -} - -// WriteTo encode the Tree as Toml and writes it to the writer w. -// Returns the number of bytes written in case of success, or an error if anything happened. -func (t *Tree) WriteTo(w io.Writer) (int64, error) { - return t.writeTo(w, "", "", 0, false) -} - -// ToTomlString generates a human-readable representation of the current tree. -// Output spans multiple lines, and is suitable for ingest by a TOML parser. -// If the conversion cannot be performed, ToString returns a non-nil error. -func (t *Tree) ToTomlString() (string, error) { - b, err := t.Marshal() - if err != nil { - return "", err - } - return string(b), nil -} - -// String generates a human-readable representation of the current tree. -// Alias of ToString. Present to implement the fmt.Stringer interface. -func (t *Tree) String() string { - result, _ := t.ToTomlString() - return result -} - -// ToMap recursively generates a representation of the tree using Go built-in structures. -// The following types are used: -// -// * bool -// * float64 -// * int64 -// * string -// * uint64 -// * time.Time -// * map[string]interface{} (where interface{} is any of this list) -// * []interface{} (where interface{} is any of this list) -func (t *Tree) ToMap() map[string]interface{} { - result := map[string]interface{}{} - - for k, v := range t.values { - switch node := v.(type) { - case []*Tree: - var array []interface{} - for _, item := range node { - array = append(array, item.ToMap()) - } - result[k] = array - case *Tree: - result[k] = node.ToMap() - case *tomlValue: - result[k] = tomlValueToGo(node.value) - } - } - return result -} - -func tomlValueToGo(v interface{}) interface{} { - if tree, ok := v.(*Tree); ok { - return tree.ToMap() - } - - rv := reflect.ValueOf(v) - - if rv.Kind() != reflect.Slice { - return v - } - values := make([]interface{}, rv.Len()) - for i := 0; i < rv.Len(); i++ { - item := rv.Index(i).Interface() - values[i] = tomlValueToGo(item) - } - return values -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_writepub.go b/vendor/github.com/pelletier/go-toml/tomltree_writepub.go deleted file mode 100644 index fa326308cf..0000000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_writepub.go +++ /dev/null @@ -1,6 +0,0 @@ -package toml - -// ValueStringRepresentation transforms an interface{} value into its toml string representation. -func ValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { - return tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) -} diff --git a/vendor/github.com/pelletier/go-toml/v2/.dockerignore b/vendor/github.com/pelletier/go-toml/v2/.dockerignore deleted file mode 100644 index 7b5883475d..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -cmd/tomll/tomll -cmd/tomljson/tomljson diff --git a/vendor/github.com/pelletier/go-toml/v2/.gitattributes b/vendor/github.com/pelletier/go-toml/v2/.gitattributes deleted file mode 100644 index 34a0a21a36..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/.gitattributes +++ /dev/null @@ -1,4 +0,0 @@ -* text=auto - -benchmark/benchmark.toml text eol=lf -testdata/** text eol=lf diff --git a/vendor/github.com/pelletier/go-toml/v2/.gitignore b/vendor/github.com/pelletier/go-toml/v2/.gitignore deleted file mode 100644 index a69e2b0ebd..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -test_program/test_program_bin -fuzz/ -cmd/tomll/tomll -cmd/tomljson/tomljson -cmd/tomltestgen/tomltestgen -dist \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/v2/.golangci.toml b/vendor/github.com/pelletier/go-toml/v2/.golangci.toml deleted file mode 100644 index 067db55174..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/.golangci.toml +++ /dev/null @@ -1,84 +0,0 @@ -[service] -golangci-lint-version = "1.39.0" - -[linters-settings.wsl] -allow-assign-and-anything = true - -[linters-settings.exhaustive] -default-signifies-exhaustive = true - -[linters] -disable-all = true -enable = [ - "asciicheck", - "bodyclose", - "cyclop", - "deadcode", - "depguard", - "dogsled", - "dupl", - "durationcheck", - "errcheck", - "errorlint", - "exhaustive", - # "exhaustivestruct", - "exportloopref", - "forbidigo", - # "forcetypeassert", - "funlen", - "gci", - # "gochecknoglobals", - "gochecknoinits", - "gocognit", - "goconst", - "gocritic", - "gocyclo", - "godot", - "godox", - # "goerr113", - "gofmt", - "gofumpt", - "goheader", - "goimports", - "golint", - "gomnd", - # "gomoddirectives", - "gomodguard", - "goprintffuncname", - "gosec", - "gosimple", - "govet", - # "ifshort", - "importas", - "ineffassign", - "lll", - "makezero", - "misspell", - "nakedret", - "nestif", - "nilerr", - # "nlreturn", - "noctx", - "nolintlint", - #"paralleltest", - "prealloc", - "predeclared", - "revive", - "rowserrcheck", - "sqlclosecheck", - "staticcheck", - "structcheck", - "stylecheck", - # "testpackage", - "thelper", - "tparallel", - "typecheck", - "unconvert", - "unparam", - "unused", - "varcheck", - "wastedassign", - "whitespace", - # "wrapcheck", - # "wsl" -] diff --git a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml deleted file mode 100644 index 1d8b69e65e..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml +++ /dev/null @@ -1,126 +0,0 @@ -before: - hooks: - - go mod tidy - - go fmt ./... - - go test ./... -builds: - - id: tomll - main: ./cmd/tomll - binary: tomll - env: - - CGO_ENABLED=0 - flags: - - -trimpath - ldflags: - - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} - mod_timestamp: '{{ .CommitTimestamp }}' - targets: - - linux_amd64 - - linux_arm64 - - linux_arm - - linux_riscv64 - - windows_amd64 - - windows_arm64 - - windows_arm - - darwin_amd64 - - darwin_arm64 - - id: tomljson - main: ./cmd/tomljson - binary: tomljson - env: - - CGO_ENABLED=0 - flags: - - -trimpath - ldflags: - - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} - mod_timestamp: '{{ .CommitTimestamp }}' - targets: - - linux_amd64 - - linux_arm64 - - linux_arm - - linux_riscv64 - - windows_amd64 - - windows_arm64 - - windows_arm - - darwin_amd64 - - darwin_arm64 - - id: jsontoml - main: ./cmd/jsontoml - binary: jsontoml - env: - - CGO_ENABLED=0 - flags: - - -trimpath - ldflags: - - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} - mod_timestamp: '{{ .CommitTimestamp }}' - targets: - - linux_amd64 - - linux_arm64 - - linux_riscv64 - - linux_arm - - windows_amd64 - - windows_arm64 - - windows_arm - - darwin_amd64 - - darwin_arm64 -universal_binaries: - - id: tomll - replace: true - name_template: tomll - - id: tomljson - replace: true - name_template: tomljson - - id: jsontoml - replace: true - name_template: jsontoml -archives: -- id: jsontoml - format: tar.xz - builds: - - jsontoml - files: - - none* - name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" -- id: tomljson - format: tar.xz - builds: - - tomljson - files: - - none* - name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" -- id: tomll - format: tar.xz - builds: - - tomll - files: - - none* - name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" -dockers: - - id: tools - goos: linux - goarch: amd64 - ids: - - jsontoml - - tomljson - - tomll - image_templates: - - "ghcr.io/pelletier/go-toml:latest" - - "ghcr.io/pelletier/go-toml:{{ .Tag }}" - - "ghcr.io/pelletier/go-toml:v{{ .Major }}" - skip_push: false -checksum: - name_template: 'sha256sums.txt' -snapshot: - name_template: "{{ incpatch .Version }}-next" -release: - github: - owner: pelletier - name: go-toml - draft: true - prerelease: auto - mode: replace -changelog: - use: github-native -announce: - skip: true diff --git a/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md deleted file mode 100644 index 04dd12bcbc..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md +++ /dev/null @@ -1,196 +0,0 @@ -# Contributing - -Thank you for your interest in go-toml! We appreciate you considering -contributing to go-toml! - -The main goal is the project is to provide an easy-to-use and efficient TOML -implementation for Go that gets the job done and gets out of your way – dealing -with TOML is probably not the central piece of your project. - -As the single maintainer of go-toml, time is scarce. All help, big or small, is -more than welcomed! - -## Ask questions - -Any question you may have, somebody else might have it too. Always feel free to -ask them on the [discussion board][discussions]. We will try to answer them as -clearly and quickly as possible, time permitting. - -Asking questions also helps us identify areas where the documentation needs -improvement, or new features that weren't envisioned before. Sometimes, a -seemingly innocent question leads to the fix of a bug. Don't hesitate and ask -away! - -[discussions]: https://github.com/pelletier/go-toml/discussions - -## Improve the documentation - -The best way to share your knowledge and experience with go-toml is to improve -the documentation. Fix a typo, clarify an interface, add an example, anything -goes! - -The documentation is present in the [README][readme] and thorough the source -code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a change -to the documentation, create a pull request with your proposed changes. For -simple changes like that, the easiest way to go is probably the "Fork this -project and edit the file" button on Github, displayed at the top right of the -file. Unless it's a trivial change (for example a typo), provide a little bit of -context in your pull request description or commit message. - -## Report a bug - -Found a bug! Sorry to hear that :(. Help us and other track them down and fix by -reporting it. [File a new bug report][bug-report] on the [issues -tracker][issues-tracker]. The template should provide enough guidance on what to -include. When in doubt: add more details! By reducing ambiguity and providing -more information, it decreases back and forth and saves everyone time. - -## Code changes - -Want to contribute a patch? Very happy to hear that! - -First, some high-level rules: - -- A short proposal with some POC code is better than a lengthy piece of text - with no code. Code speaks louder than words. That being said, bigger changes - should probably start with a [discussion][discussions]. -- No backward-incompatible patch will be accepted unless discussed. Sometimes - it's hard, but we try not to break people's programs unless we absolutely have - to. -- If you are writing a new feature or extending an existing one, make sure to - write some documentation. -- Bug fixes need to be accompanied with regression tests. -- New code needs to be tested. -- Your commit messages need to explain why the change is needed, even if already - included in the PR description. - -It does sound like a lot, but those best practices are here to save time overall -and continuously improve the quality of the project, which is something everyone -benefits from. - -### Get started - -The fairly standard code contribution process looks like that: - -1. [Fork the project][fork]. -2. Make your changes, commit on any branch you like. -3. [Open up a pull request][pull-request] -4. Review, potential ask for changes. -5. Merge. - -Feel free to ask for help! You can create draft pull requests to gather -some early feedback! - -### Run the tests - -You can run tests for go-toml using Go's test tool: `go test -race ./...`. - -During the pull request process, all tests will be ran on Linux, Windows, and -MacOS on the last two versions of Go. - -However, given GitHub's new policy to _not_ run Actions on pull requests until a -maintainer clicks on button, it is highly recommended that you run them locally -as you make changes. - -### Check coverage - -We use `go tool cover` to compute test coverage. Most code editors have a way to -run and display code coverage, but at the end of the day, we do this: - -``` -go test -covermode=atomic -coverprofile=coverage.out -go tool cover -func=coverage.out -``` - -and verify that the overall percentage of tested code does not go down. This is -a requirement. As a rule of thumb, all lines of code touched by your changes -should be covered. On Unix you can use `./ci.sh coverage -d v2` to check if your -code lowers the coverage. - -### Verify performance - -Go-toml aims to stay efficient. We rely on a set of scenarios executed with Go's -builtin benchmark systems. Because of their noisy nature, containers provided by -Github Actions cannot be reliably used for benchmarking. As a result, you are -responsible for checking that your changes do not incur a performance penalty. -You can run their following to execute benchmarks: - -``` -go test ./... -bench=. -count=10 -``` - -Benchmark results should be compared against each other with -[benchstat][benchstat]. Typical flow looks like this: - -1. On the `v2` branch, run `go test ./... -bench=. -count 10` and save output to - a file (for example `old.txt`). -2. Make some code changes. -3. Run `go test ....` again, and save the output to an other file (for example - `new.txt`). -4. Run `benchstat old.txt new.txt` to check that time/op does not go up in any - test. - -On Unix you can use `./ci.sh benchmark -d v2` to verify how your code impacts -performance. - -It is highly encouraged to add the benchstat results to your pull request -description. Pull requests that lower performance will receive more scrutiny. - -[benchstat]: https://pkg.go.dev/golang.org/x/perf/cmd/benchstat - -### Style - -Try to look around and follow the same format and structure as the rest of the -code. We enforce using `go fmt` on the whole code base. - ---- - -## Maintainers-only - -### Merge pull request - -Checklist: - -- Passing CI. -- Does not introduce backward-incompatible changes (unless discussed). -- Has relevant doc changes. -- Benchstat does not show performance regression. -- Pull request is [labeled appropriately][pr-labels]. -- Title will be understandable in the changelog. - -1. Merge using "squash and merge". -2. Make sure to edit the commit message to keep all the useful information - nice and clean. -3. Make sure the commit title is clear and contains the PR number (#123). - -### New release - -1. Decide on the next version number. Use semver. -2. Generate release notes using [`gh`][gh]. Example: -``` -$ gh api -X POST \ - -F tag_name='v2.0.0-beta.5' \ - -F target_commitish='v2' \ - -F previous_tag_name='v2.0.0-beta.4' \ - --jq '.body' \ - repos/pelletier/go-toml/releases/generate-notes -``` -3. Look for "Other changes". That would indicate a pull request not labeled - properly. Tweak labels and pull request titles until changelog looks good for - users. -4. [Draft new release][new-release]. -5. Fill tag and target with the same value used to generate the changelog. -6. Set title to the new tag value. -7. Paste the generated changelog. -8. Check "create discussion", in the "Releases" category. -9. Check pre-release if new version is an alpha or beta. - -[issues-tracker]: https://github.com/pelletier/go-toml/issues -[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md -[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml -[readme]: ./README.md -[fork]: https://help.github.com/articles/fork-a-repo -[pull-request]: https://help.github.com/en/articles/creating-a-pull-request -[new-release]: https://github.com/pelletier/go-toml/releases/new -[gh]: https://github.com/cli/cli -[pr-labels]: https://github.com/pelletier/go-toml/blob/v2/.github/release.yml diff --git a/vendor/github.com/pelletier/go-toml/v2/Dockerfile b/vendor/github.com/pelletier/go-toml/v2/Dockerfile deleted file mode 100644 index b9e9332379..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM scratch -ENV PATH "$PATH:/bin" -COPY tomll /bin/tomll -COPY tomljson /bin/tomljson -COPY jsontoml /bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/v2/LICENSE b/vendor/github.com/pelletier/go-toml/v2/LICENSE deleted file mode 100644 index 991e2ae966..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -go-toml v2 -Copyright (c) 2021 - 2023 Thomas Pelletier - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md deleted file mode 100644 index 63b92f3b0b..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/README.md +++ /dev/null @@ -1,575 +0,0 @@ -# go-toml v2 - -Go library for the [TOML](https://toml.io/en/) format. - -This library supports [TOML v1.0.0](https://toml.io/en/v1.0.0). - -[🐞 Bug Reports](https://github.com/pelletier/go-toml/issues) - -[💬 Anything else](https://github.com/pelletier/go-toml/discussions) - -## Documentation - -Full API, examples, and implementation notes are available in the Go -documentation. - -[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml/v2.svg)](https://pkg.go.dev/github.com/pelletier/go-toml/v2) - -## Import - -```go -import "github.com/pelletier/go-toml/v2" -``` - -See [Modules](#Modules). - -## Features - -### Stdlib behavior - -As much as possible, this library is designed to behave similarly as the -standard library's `encoding/json`. - -### Performance - -While go-toml favors usability, it is written with performance in mind. Most -operations should not be shockingly slow. See [benchmarks](#benchmarks). - -### Strict mode - -`Decoder` can be set to "strict mode", which makes it error when some parts of -the TOML document was not present in the target structure. This is a great way -to check for typos. [See example in the documentation][strict]. - -[strict]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Decoder.DisallowUnknownFields - -### Contextualized errors - -When most decoding errors occur, go-toml returns [`DecodeError`][decode-err], -which contains a human readable contextualized version of the error. For -example: - -``` -1| [server] -2| path = 100 - | ~~~ cannot decode TOML integer into struct field toml_test.Server.Path of type string -3| port = 50 -``` - -[decode-err]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#DecodeError - -### Local date and time support - -TOML supports native [local date/times][ldt]. It allows to represent a given -date, time, or date-time without relation to a timezone or offset. To support -this use-case, go-toml provides [`LocalDate`][tld], [`LocalTime`][tlt], and -[`LocalDateTime`][tldt]. Those types can be transformed to and from `time.Time`, -making them convenient yet unambiguous structures for their respective TOML -representation. - -[ldt]: https://toml.io/en/v1.0.0#local-date-time -[tld]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDate -[tlt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalTime -[tldt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDateTime - -### Commented config - -Since TOML is often used for configuration files, go-toml can emit documents -annotated with [comments and commented-out values][comments-example]. For -example, it can generate the following file: - -```toml -# Host IP to connect to. -host = '127.0.0.1' -# Port of the remote server. -port = 4242 - -# Encryption parameters (optional) -# [TLS] -# cipher = 'AEAD-AES128-GCM-SHA256' -# version = 'TLS 1.3' -``` - -[comments-example]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Marshal-Commented - -## Getting started - -Given the following struct, let's see how to read it and write it as TOML: - -```go -type MyConfig struct { - Version int - Name string - Tags []string -} -``` - -### Unmarshaling - -[`Unmarshal`][unmarshal] reads a TOML document and fills a Go structure with its -content. For example: - -```go -doc := ` -version = 2 -name = "go-toml" -tags = ["go", "toml"] -` - -var cfg MyConfig -err := toml.Unmarshal([]byte(doc), &cfg) -if err != nil { - panic(err) -} -fmt.Println("version:", cfg.Version) -fmt.Println("name:", cfg.Name) -fmt.Println("tags:", cfg.Tags) - -// Output: -// version: 2 -// name: go-toml -// tags: [go toml] -``` - -[unmarshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Unmarshal - -### Marshaling - -[`Marshal`][marshal] is the opposite of Unmarshal: it represents a Go structure -as a TOML document: - -```go -cfg := MyConfig{ - Version: 2, - Name: "go-toml", - Tags: []string{"go", "toml"}, -} - -b, err := toml.Marshal(cfg) -if err != nil { - panic(err) -} -fmt.Println(string(b)) - -// Output: -// Version = 2 -// Name = 'go-toml' -// Tags = ['go', 'toml'] -``` - -[marshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Marshal - -## Unstable API - -This API does not yet follow the backward compatibility guarantees of this -library. They provide early access to features that may have rough edges or an -API subject to change. - -### Parser - -Parser is the unstable API that allows iterative parsing of a TOML document at -the AST level. See https://pkg.go.dev/github.com/pelletier/go-toml/v2/unstable. - -## Benchmarks - -Execution time speedup compared to other Go TOML libraries: - - - - - - - - - - - - - -
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-21.9x1.9x
Marshal/ReferenceFile/map-21.7x1.8x
Marshal/ReferenceFile/struct-22.2x2.5x
Unmarshal/HugoFrontMatter-22.9x2.9x
Unmarshal/ReferenceFile/map-22.6x2.9x
Unmarshal/ReferenceFile/struct-24.4x5.3x
-
See more -

The table above has the results of the most common use-cases. The table below -contains the results of all benchmarks, including unrealistic ones. It is -provided for completeness.

- - - - - - - - - - - - - - - - - - -
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-21.8x2.9x
Marshal/SimpleDocument/struct-22.7x4.2x
Unmarshal/SimpleDocument/map-24.5x3.1x
Unmarshal/SimpleDocument/struct-26.2x3.9x
UnmarshalDataset/example-23.1x3.5x
UnmarshalDataset/code-22.3x3.1x
UnmarshalDataset/twitter-22.5x2.6x
UnmarshalDataset/citm_catalog-22.1x2.2x
UnmarshalDataset/canada-21.6x1.3x
UnmarshalDataset/config-24.3x3.2x
[Geo mean]2.7x2.8x
-

This table can be generated with ./ci.sh benchmark -a -html.

-
- -## Modules - -go-toml uses Go's standard modules system. - -Installation instructions: - -- Go ≥ 1.16: Nothing to do. Use the import in your code. The `go` command deals - with it automatically. -- Go ≥ 1.13: `GO111MODULE=on go get github.com/pelletier/go-toml/v2`. - -In case of trouble: [Go Modules FAQ][mod-faq]. - -[mod-faq]: https://github.com/golang/go/wiki/Modules#why-does-installing-a-tool-via-go-get-fail-with-error-cannot-find-main-module - -## Tools - -Go-toml provides three handy command line tools: - - * `tomljson`: Reads a TOML file and outputs its JSON representation. - - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest - $ tomljson --help - ``` - - * `jsontoml`: Reads a JSON file and outputs a TOML representation. - - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest - $ jsontoml --help - ``` - - * `tomll`: Lints and reformats a TOML file. - - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest - $ tomll --help - ``` - -### Docker image - -Those tools are also available as a [Docker image][docker]. For example, to use -`tomljson`: - -``` -docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml -``` - -Multiple versions are availble on [ghcr.io][docker]. - -[docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml - -## Migrating from v1 - -This section describes the differences between v1 and v2, with some pointers on -how to get the original behavior when possible. - -### Decoding / Unmarshal - -#### Automatic field name guessing - -When unmarshaling to a struct, if a key in the TOML document does not exactly -match the name of a struct field or any of the `toml`-tagged field, v1 tries -multiple variations of the key ([code][v1-keys]). - -V2 instead does a case-insensitive matching, like `encoding/json`. - -This could impact you if you are relying on casing to differentiate two fields, -and one of them is a not using the `toml` struct tag. The recommended solution -is to be specific about tag names for those fields using the `toml` struct tag. - -[v1-keys]: https://github.com/pelletier/go-toml/blob/a2e52561804c6cd9392ebf0048ca64fe4af67a43/marshal.go#L775-L781 - -#### Ignore preexisting value in interface - -When decoding into a non-nil `interface{}`, go-toml v1 uses the type of the -element in the interface to decode the object. For example: - -```go -type inner struct { - B interface{} -} -type doc struct { - A interface{} -} - -d := doc{ - A: inner{ - B: "Before", - }, -} - -data := ` -[A] -B = "After" -` - -toml.Unmarshal([]byte(data), &d) -fmt.Printf("toml v1: %#v\n", d) - -// toml v1: main.doc{A:main.inner{B:"After"}} -``` - -In this case, field `A` is of type `interface{}`, containing a `inner` struct. -V1 sees that type and uses it when decoding the object. - -When decoding an object into an `interface{}`, V2 instead disregards whatever -value the `interface{}` may contain and replaces it with a -`map[string]interface{}`. With the same data structure as above, here is what -the result looks like: - -```go -toml.Unmarshal([]byte(data), &d) -fmt.Printf("toml v2: %#v\n", d) - -// toml v2: main.doc{A:map[string]interface {}{"B":"After"}} -``` - -This is to match `encoding/json`'s behavior. There is no way to make the v2 -decoder behave like v1. - -#### Values out of array bounds ignored - -When decoding into an array, v1 returns an error when the number of elements -contained in the doc is superior to the capacity of the array. For example: - -```go -type doc struct { - A [2]string -} -d := doc{} -err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) -fmt.Println(err) - -// (1, 1): unmarshal: TOML array length (3) exceeds destination array length (2) -``` - -In the same situation, v2 ignores the last value: - -```go -err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) -fmt.Println("err:", err, "d:", d) -// err: d: {[one two]} -``` - -This is to match `encoding/json`'s behavior. There is no way to make the v2 -decoder behave like v1. - -#### Support for `toml.Unmarshaler` has been dropped - -This method was not widely used, poorly defined, and added a lot of complexity. -A similar effect can be achieved by implementing the `encoding.TextUnmarshaler` -interface and use strings. - -#### Support for `default` struct tag has been dropped - -This feature adds complexity and a poorly defined API for an effect that can be -accomplished outside of the library. - -It does not seem like other format parsers in Go support that feature (the -project referenced in the original ticket #202 has not been updated since 2017). -Given that go-toml v2 should not touch values not in the document, the same -effect can be achieved by pre-filling the struct with defaults (libraries like -[go-defaults][go-defaults] can help). Also, string representation is not well -defined for all types: it creates issues like #278. - -The recommended replacement is pre-filling the struct before unmarshaling. - -[go-defaults]: https://github.com/mcuadros/go-defaults - -#### `toml.Tree` replacement - -This structure was the initial attempt at providing a document model for -go-toml. It allows manipulating the structure of any document, encoding and -decoding from their TOML representation. While a more robust feature was -initially planned in go-toml v2, this has been ultimately [removed from -scope][nodoc] of this library, with no plan to add it back at the moment. The -closest equivalent at the moment would be to unmarshal into an `interface{}` and -use type assertions and/or reflection to manipulate the arbitrary -structure. However this would fall short of providing all of the TOML features -such as adding comments and be specific about whitespace. - - -#### `toml.Position` are not retrievable anymore - -The API for retrieving the position (line, column) of a specific TOML element do -not exist anymore. This was done to minimize the amount of concepts introduced -by the library (query path), and avoid the performance hit related to storing -positions in the absence of a document model, for a feature that seemed to have -little use. Errors however have gained more detailed position -information. Position retrieval seems better fitted for a document model, which -has been [removed from the scope][nodoc] of go-toml v2 at the moment. - -### Encoding / Marshal - -#### Default struct fields order - -V1 emits struct fields order alphabetically by default. V2 struct fields are -emitted in order they are defined. For example: - -```go -type S struct { - B string - A string -} - -data := S{ - B: "B", - A: "A", -} - -b, _ := tomlv1.Marshal(data) -fmt.Println("v1:\n" + string(b)) - -b, _ = tomlv2.Marshal(data) -fmt.Println("v2:\n" + string(b)) - -// Output: -// v1: -// A = "A" -// B = "B" - -// v2: -// B = 'B' -// A = 'A' -``` - -There is no way to make v2 encoder behave like v1. A workaround could be to -manually sort the fields alphabetically in the struct definition, or generate -struct types using `reflect.StructOf`. - -#### No indentation by default - -V1 automatically indents content of tables by default. V2 does not. However the -same behavior can be obtained using [`Encoder.SetIndentTables`][sit]. For example: - -```go -data := map[string]interface{}{ - "table": map[string]string{ - "key": "value", - }, -} - -b, _ := tomlv1.Marshal(data) -fmt.Println("v1:\n" + string(b)) - -b, _ = tomlv2.Marshal(data) -fmt.Println("v2:\n" + string(b)) - -buf := bytes.Buffer{} -enc := tomlv2.NewEncoder(&buf) -enc.SetIndentTables(true) -enc.Encode(data) -fmt.Println("v2 Encoder:\n" + string(buf.Bytes())) - -// Output: -// v1: -// -// [table] -// key = "value" -// -// v2: -// [table] -// key = 'value' -// -// -// v2 Encoder: -// [table] -// key = 'value' -``` - -[sit]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Encoder.SetIndentTables - -#### Keys and strings are single quoted - -V1 always uses double quotes (`"`) around strings and keys that cannot be -represented bare (unquoted). V2 uses single quotes instead by default (`'`), -unless a character cannot be represented, then falls back to double quotes. As a -result of this change, `Encoder.QuoteMapKeys` has been removed, as it is not -useful anymore. - -There is no way to make v2 encoder behave like v1. - -#### `TextMarshaler` emits as a string, not TOML - -Types that implement [`encoding.TextMarshaler`][tm] can emit arbitrary TOML in -v1. The encoder would append the result to the output directly. In v2 the result -is wrapped in a string. As a result, this interface cannot be implemented by the -root object. - -There is no way to make v2 encoder behave like v1. - -[tm]: https://golang.org/pkg/encoding/#TextMarshaler - -#### `Encoder.CompactComments` has been removed - -Emitting compact comments is now the default behavior of go-toml. This option -is not necessary anymore. - -#### Struct tags have been merged - -V1 used to provide multiple struct tags: `comment`, `commented`, `multiline`, -`toml`, and `omitempty`. To behave more like the standard library, v2 has merged -`toml`, `multiline`, `commented`, and `omitempty`. For example: - -```go -type doc struct { - // v1 - F string `toml:"field" multiline:"true" omitempty:"true" commented:"true"` - // v2 - F string `toml:"field,multiline,omitempty,commented"` -} -``` - -Has a result, the `Encoder.SetTag*` methods have been removed, as there is just -one tag now. - -#### `Encoder.ArraysWithOneElementPerLine` has been renamed - -The new name is `Encoder.SetArraysMultiline`. The behavior should be the same. - -#### `Encoder.Indentation` has been renamed - -The new name is `Encoder.SetIndentSymbol`. The behavior should be the same. - - -#### Embedded structs behave like stdlib - -V1 defaults to merging embedded struct fields into the embedding struct. This -behavior was unexpected because it does not follow the standard library. To -avoid breaking backward compatibility, the `Encoder.PromoteAnonymous` method was -added to make the encoder behave correctly. Given backward compatibility is not -a problem anymore, v2 does the right thing by default: it follows the behavior -of `encoding/json`. `Encoder.PromoteAnonymous` has been removed. - -[nodoc]: https://github.com/pelletier/go-toml/discussions/506#discussioncomment-1526038 - -### `query` - -go-toml v1 provided the [`go-toml/query`][query] package. It allowed to run -JSONPath-style queries on TOML files. This feature is not available in v2. For a -replacement, check out [dasel][dasel]. - -This package has been removed because it was essentially not supported anymore -(last commit May 2020), increased the complexity of the code base, and more -complete solutions exist out there. - -[query]: https://github.com/pelletier/go-toml/tree/f99d6bbca119636aeafcf351ee52b3d202782627/query -[dasel]: https://github.com/TomWright/dasel - -## Versioning - -Go-toml follows [Semantic Versioning](https://semver.org). The supported version -of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of -this document. The last two major versions of Go are supported -(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). - -## License - -The MIT License (MIT). Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/v2/SECURITY.md b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md deleted file mode 100644 index b2f21cfc92..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/SECURITY.md +++ /dev/null @@ -1,19 +0,0 @@ -# Security Policy - -## Supported Versions - -Use this section to tell people about which versions of your project are -currently being supported with security updates. - -| Version | Supported | -| ---------- | ------------------ | -| Latest 2.x | :white_check_mark: | -| All 1.x | :x: | -| All 0.x | :x: | - -## Reporting a Vulnerability - -Email a vulnerability report to `security@pelletier.codes`. Make sure to include -as many details as possible to reproduce the vulnerability. This is a -side-project: I will try to get back to you as quickly as possible, time -permitting in my personal life. Providing a working patch helps very much! diff --git a/vendor/github.com/pelletier/go-toml/v2/ci.sh b/vendor/github.com/pelletier/go-toml/v2/ci.sh deleted file mode 100644 index 9ae8b75375..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/ci.sh +++ /dev/null @@ -1,280 +0,0 @@ -#!/usr/bin/env bash - - -stderr() { - echo "$@" 1>&2 -} - -usage() { - b=$(basename "$0") - echo $b: ERROR: "$@" 1>&2 - - cat 1>&2 < coverage.out - go tool cover -func=coverage.out - echo "Coverage profile for ${branch}: ${dir}/coverage.out" >&2 - popd - - if [ "${branch}" != "HEAD" ]; then - git worktree remove --force "$dir" - fi -} - -coverage() { - case "$1" in - -d) - shift - target="${1?Need to provide a target branch argument}" - - output_dir="$(mktemp -d)" - target_out="${output_dir}/target.txt" - head_out="${output_dir}/head.txt" - - cover "${target}" > "${target_out}" - cover "HEAD" > "${head_out}" - - cat "${target_out}" - cat "${head_out}" - - echo "" - - target_pct="$(tail -n2 ${target_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%.*/\1/')" - head_pct="$(tail -n2 ${head_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%/\1/')" - echo "Results: ${target} ${target_pct}% HEAD ${head_pct}%" - - delta_pct=$(echo "$head_pct - $target_pct" | bc -l) - echo "Delta: ${delta_pct}" - - if [[ $delta_pct = \-* ]]; then - echo "Regression!"; - - target_diff="${output_dir}/target.diff.txt" - head_diff="${output_dir}/head.diff.txt" - cat "${target_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${target_diff}" - cat "${head_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${head_diff}" - - diff --side-by-side --suppress-common-lines "${target_diff}" "${head_diff}" - return 1 - fi - return 0 - ;; - esac - - cover "${1-HEAD}" -} - -bench() { - branch="${1}" - out="${2}" - replace="${3}" - dir="$(mktemp -d)" - - stderr "Executing benchmark for ${branch} at ${dir}" - - if [ "${branch}" = "HEAD" ]; then - cp -r . "${dir}/" - else - git worktree add "$dir" "$branch" - fi - - pushd "$dir" - - if [ "${replace}" != "" ]; then - find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2|${replace}|g" {} \; - go get "${replace}" - fi - - export GOMAXPROCS=2 - nice -n -19 taskset --cpu-list 0,1 go test '-bench=^Benchmark(Un)?[mM]arshal' -count=5 -run=Nothing ./... | tee "${out}" - popd - - if [ "${branch}" != "HEAD" ]; then - git worktree remove --force "$dir" - fi -} - -fmktemp() { - if mktemp --version|grep GNU >/dev/null; then - mktemp --suffix=-$1; - else - mktemp -t $1; - fi -} - -benchstathtml() { -python3 - $1 <<'EOF' -import sys - -lines = [] -stop = False - -with open(sys.argv[1]) as f: - for line in f.readlines(): - line = line.strip() - if line == "": - stop = True - if not stop: - lines.append(line.split(',')) - -results = [] -for line in reversed(lines[1:]): - v2 = float(line[1]) - results.append([ - line[0].replace("-32", ""), - "%.1fx" % (float(line[3])/v2), # v1 - "%.1fx" % (float(line[5])/v2), # bs - ]) -# move geomean to the end -results.append(results[0]) -del results[0] - - -def printtable(data): - print(""" - - - - - """) - - for r in data: - print(" ".format(*r)) - - print(""" -
Benchmarkgo-toml v1BurntSushi/toml
{}{}{}
""") - - -def match(x): - return "ReferenceFile" in x[0] or "HugoFrontMatter" in x[0] - -above = [x for x in results if match(x)] -below = [x for x in results if not match(x)] - -printtable(above) -print("
See more") -print("""

The table above has the results of the most common use-cases. The table below -contains the results of all benchmarks, including unrealistic ones. It is -provided for completeness.

""") -printtable(below) -print('

This table can be generated with ./ci.sh benchmark -a -html.

') -print("
") - -EOF -} - -benchmark() { - case "$1" in - -d) - shift - target="${1?Need to provide a target branch argument}" - - old=`fmktemp ${target}` - bench "${target}" "${old}" - - new=`fmktemp HEAD` - bench HEAD "${new}" - - benchstat "${old}" "${new}" - return 0 - ;; - -a) - shift - - v2stats=`fmktemp go-toml-v2` - bench HEAD "${v2stats}" "github.com/pelletier/go-toml/v2" - v1stats=`fmktemp go-toml-v1` - bench HEAD "${v1stats}" "github.com/pelletier/go-toml" - bsstats=`fmktemp bs-toml` - bench HEAD "${bsstats}" "github.com/BurntSushi/toml" - - cp "${v2stats}" go-toml-v2.txt - cp "${v1stats}" go-toml-v1.txt - cp "${bsstats}" bs-toml.txt - - if [ "$1" = "-html" ]; then - tmpcsv=`fmktemp csv` - benchstat -csv -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv - benchstathtml $tmpcsv - else - benchstat -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt - fi - - rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt - return $? - esac - - bench "${1-HEAD}" `mktemp` -} - -case "$1" in - coverage) shift; coverage $@;; - benchmark) shift; benchmark $@;; - *) usage "bad argument $1";; -esac diff --git a/vendor/github.com/pelletier/go-toml/v2/decode.go b/vendor/github.com/pelletier/go-toml/v2/decode.go deleted file mode 100644 index f0ec3b1705..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/decode.go +++ /dev/null @@ -1,550 +0,0 @@ -package toml - -import ( - "fmt" - "math" - "strconv" - "time" - - "github.com/pelletier/go-toml/v2/unstable" -) - -func parseInteger(b []byte) (int64, error) { - if len(b) > 2 && b[0] == '0' { - switch b[1] { - case 'x': - return parseIntHex(b) - case 'b': - return parseIntBin(b) - case 'o': - return parseIntOct(b) - default: - panic(fmt.Errorf("invalid base '%c', should have been checked by scanIntOrFloat", b[1])) - } - } - - return parseIntDec(b) -} - -func parseLocalDate(b []byte) (LocalDate, error) { - // full-date = date-fullyear "-" date-month "-" date-mday - // date-fullyear = 4DIGIT - // date-month = 2DIGIT ; 01-12 - // date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year - var date LocalDate - - if len(b) != 10 || b[4] != '-' || b[7] != '-' { - return date, unstable.NewParserError(b, "dates are expected to have the format YYYY-MM-DD") - } - - var err error - - date.Year, err = parseDecimalDigits(b[0:4]) - if err != nil { - return LocalDate{}, err - } - - date.Month, err = parseDecimalDigits(b[5:7]) - if err != nil { - return LocalDate{}, err - } - - date.Day, err = parseDecimalDigits(b[8:10]) - if err != nil { - return LocalDate{}, err - } - - if !isValidDate(date.Year, date.Month, date.Day) { - return LocalDate{}, unstable.NewParserError(b, "impossible date") - } - - return date, nil -} - -func parseDecimalDigits(b []byte) (int, error) { - v := 0 - - for i, c := range b { - if c < '0' || c > '9' { - return 0, unstable.NewParserError(b[i:i+1], "expected digit (0-9)") - } - v *= 10 - v += int(c - '0') - } - - return v, nil -} - -func parseDateTime(b []byte) (time.Time, error) { - // offset-date-time = full-date time-delim full-time - // full-time = partial-time time-offset - // time-offset = "Z" / time-numoffset - // time-numoffset = ( "+" / "-" ) time-hour ":" time-minute - - dt, b, err := parseLocalDateTime(b) - if err != nil { - return time.Time{}, err - } - - var zone *time.Location - - if len(b) == 0 { - // parser should have checked that when assigning the date time node - panic("date time should have a timezone") - } - - if b[0] == 'Z' || b[0] == 'z' { - b = b[1:] - zone = time.UTC - } else { - const dateTimeByteLen = 6 - if len(b) != dateTimeByteLen { - return time.Time{}, unstable.NewParserError(b, "invalid date-time timezone") - } - var direction int - switch b[0] { - case '-': - direction = -1 - case '+': - direction = +1 - default: - return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset character") - } - - if b[3] != ':' { - return time.Time{}, unstable.NewParserError(b[3:4], "expected a : separator") - } - - hours, err := parseDecimalDigits(b[1:3]) - if err != nil { - return time.Time{}, err - } - if hours > 23 { - return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset hours") - } - - minutes, err := parseDecimalDigits(b[4:6]) - if err != nil { - return time.Time{}, err - } - if minutes > 59 { - return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset minutes") - } - - seconds := direction * (hours*3600 + minutes*60) - if seconds == 0 { - zone = time.UTC - } else { - zone = time.FixedZone("", seconds) - } - b = b[dateTimeByteLen:] - } - - if len(b) > 0 { - return time.Time{}, unstable.NewParserError(b, "extra bytes at the end of the timezone") - } - - t := time.Date( - dt.Year, - time.Month(dt.Month), - dt.Day, - dt.Hour, - dt.Minute, - dt.Second, - dt.Nanosecond, - zone) - - return t, nil -} - -func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) { - var dt LocalDateTime - - const localDateTimeByteMinLen = 11 - if len(b) < localDateTimeByteMinLen { - return dt, nil, unstable.NewParserError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]") - } - - date, err := parseLocalDate(b[:10]) - if err != nil { - return dt, nil, err - } - dt.LocalDate = date - - sep := b[10] - if sep != 'T' && sep != ' ' && sep != 't' { - return dt, nil, unstable.NewParserError(b[10:11], "datetime separator is expected to be T or a space") - } - - t, rest, err := parseLocalTime(b[11:]) - if err != nil { - return dt, nil, err - } - dt.LocalTime = t - - return dt, rest, nil -} - -// parseLocalTime is a bit different because it also returns the remaining -// []byte that is didn't need. This is to allow parseDateTime to parse those -// remaining bytes as a timezone. -func parseLocalTime(b []byte) (LocalTime, []byte, error) { - var ( - nspow = [10]int{0, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0} - t LocalTime - ) - - // check if b matches to have expected format HH:MM:SS[.NNNNNN] - const localTimeByteLen = 8 - if len(b) < localTimeByteLen { - return t, nil, unstable.NewParserError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]") - } - - var err error - - t.Hour, err = parseDecimalDigits(b[0:2]) - if err != nil { - return t, nil, err - } - - if t.Hour > 23 { - return t, nil, unstable.NewParserError(b[0:2], "hour cannot be greater 23") - } - if b[2] != ':' { - return t, nil, unstable.NewParserError(b[2:3], "expecting colon between hours and minutes") - } - - t.Minute, err = parseDecimalDigits(b[3:5]) - if err != nil { - return t, nil, err - } - if t.Minute > 59 { - return t, nil, unstable.NewParserError(b[3:5], "minutes cannot be greater 59") - } - if b[5] != ':' { - return t, nil, unstable.NewParserError(b[5:6], "expecting colon between minutes and seconds") - } - - t.Second, err = parseDecimalDigits(b[6:8]) - if err != nil { - return t, nil, err - } - - if t.Second > 60 { - return t, nil, unstable.NewParserError(b[6:8], "seconds cannot be greater 60") - } - - b = b[8:] - - if len(b) >= 1 && b[0] == '.' { - frac := 0 - precision := 0 - digits := 0 - - for i, c := range b[1:] { - if !isDigit(c) { - if i == 0 { - return t, nil, unstable.NewParserError(b[0:1], "need at least one digit after fraction point") - } - break - } - digits++ - - const maxFracPrecision = 9 - if i >= maxFracPrecision { - // go-toml allows decoding fractional seconds - // beyond the supported precision of 9 - // digits. It truncates the fractional component - // to the supported precision and ignores the - // remaining digits. - // - // https://github.com/pelletier/go-toml/discussions/707 - continue - } - - frac *= 10 - frac += int(c - '0') - precision++ - } - - if precision == 0 { - return t, nil, unstable.NewParserError(b[:1], "nanoseconds need at least one digit") - } - - t.Nanosecond = frac * nspow[precision] - t.Precision = precision - - return t, b[1+digits:], nil - } - return t, b, nil -} - -//nolint:cyclop -func parseFloat(b []byte) (float64, error) { - if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' { - return math.NaN(), nil - } - - cleaned, err := checkAndRemoveUnderscoresFloats(b) - if err != nil { - return 0, err - } - - if cleaned[0] == '.' { - return 0, unstable.NewParserError(b, "float cannot start with a dot") - } - - if cleaned[len(cleaned)-1] == '.' { - return 0, unstable.NewParserError(b, "float cannot end with a dot") - } - - dotAlreadySeen := false - for i, c := range cleaned { - if c == '.' { - if dotAlreadySeen { - return 0, unstable.NewParserError(b[i:i+1], "float can have at most one decimal point") - } - if !isDigit(cleaned[i-1]) { - return 0, unstable.NewParserError(b[i-1:i+1], "float decimal point must be preceded by a digit") - } - if !isDigit(cleaned[i+1]) { - return 0, unstable.NewParserError(b[i:i+2], "float decimal point must be followed by a digit") - } - dotAlreadySeen = true - } - } - - start := 0 - if cleaned[0] == '+' || cleaned[0] == '-' { - start = 1 - } - if cleaned[start] == '0' && len(cleaned) > start+1 && isDigit(cleaned[start+1]) { - return 0, unstable.NewParserError(b, "float integer part cannot have leading zeroes") - } - - f, err := strconv.ParseFloat(string(cleaned), 64) - if err != nil { - return 0, unstable.NewParserError(b, "unable to parse float: %w", err) - } - - return f, nil -} - -func parseIntHex(b []byte) (int64, error) { - cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) - if err != nil { - return 0, err - } - - i, err := strconv.ParseInt(string(cleaned), 16, 64) - if err != nil { - return 0, unstable.NewParserError(b, "couldn't parse hexadecimal number: %w", err) - } - - return i, nil -} - -func parseIntOct(b []byte) (int64, error) { - cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) - if err != nil { - return 0, err - } - - i, err := strconv.ParseInt(string(cleaned), 8, 64) - if err != nil { - return 0, unstable.NewParserError(b, "couldn't parse octal number: %w", err) - } - - return i, nil -} - -func parseIntBin(b []byte) (int64, error) { - cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) - if err != nil { - return 0, err - } - - i, err := strconv.ParseInt(string(cleaned), 2, 64) - if err != nil { - return 0, unstable.NewParserError(b, "couldn't parse binary number: %w", err) - } - - return i, nil -} - -func isSign(b byte) bool { - return b == '+' || b == '-' -} - -func parseIntDec(b []byte) (int64, error) { - cleaned, err := checkAndRemoveUnderscoresIntegers(b) - if err != nil { - return 0, err - } - - startIdx := 0 - - if isSign(cleaned[0]) { - startIdx++ - } - - if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' { - return 0, unstable.NewParserError(b, "leading zero not allowed on decimal number") - } - - i, err := strconv.ParseInt(string(cleaned), 10, 64) - if err != nil { - return 0, unstable.NewParserError(b, "couldn't parse decimal number: %w", err) - } - - return i, nil -} - -func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) { - start := 0 - if b[start] == '+' || b[start] == '-' { - start++ - } - - if len(b) == start { - return b, nil - } - - if b[start] == '_' { - return nil, unstable.NewParserError(b[start:start+1], "number cannot start with underscore") - } - - if b[len(b)-1] == '_' { - return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") - } - - // fast path - i := 0 - for ; i < len(b); i++ { - if b[i] == '_' { - break - } - } - if i == len(b) { - return b, nil - } - - before := false - cleaned := make([]byte, i, len(b)) - copy(cleaned, b) - - for i++; i < len(b); i++ { - c := b[i] - if c == '_' { - if !before { - return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") - } - before = false - } else { - before = true - cleaned = append(cleaned, c) - } - } - - return cleaned, nil -} - -func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) { - if b[0] == '_' { - return nil, unstable.NewParserError(b[0:1], "number cannot start with underscore") - } - - if b[len(b)-1] == '_' { - return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") - } - - // fast path - i := 0 - for ; i < len(b); i++ { - if b[i] == '_' { - break - } - } - if i == len(b) { - return b, nil - } - - before := false - cleaned := make([]byte, 0, len(b)) - - for i := 0; i < len(b); i++ { - c := b[i] - - switch c { - case '_': - if !before { - return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") - } - if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') { - return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore before exponent") - } - before = false - case '+', '-': - // signed exponents - cleaned = append(cleaned, c) - before = false - case 'e', 'E': - if i < len(b)-1 && b[i+1] == '_' { - return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after exponent") - } - cleaned = append(cleaned, c) - case '.': - if i < len(b)-1 && b[i+1] == '_' { - return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after decimal point") - } - if i > 0 && b[i-1] == '_' { - return nil, unstable.NewParserError(b[i-1:i], "cannot have underscore before decimal point") - } - cleaned = append(cleaned, c) - default: - before = true - cleaned = append(cleaned, c) - } - } - - return cleaned, nil -} - -// isValidDate checks if a provided date is a date that exists. -func isValidDate(year int, month int, day int) bool { - return month > 0 && month < 13 && day > 0 && day <= daysIn(month, year) -} - -// daysBefore[m] counts the number of days in a non-leap year -// before month m begins. There is an entry for m=12, counting -// the number of days before January of next year (365). -var daysBefore = [...]int32{ - 0, - 31, - 31 + 28, - 31 + 28 + 31, - 31 + 28 + 31 + 30, - 31 + 28 + 31 + 30 + 31, - 31 + 28 + 31 + 30 + 31 + 30, - 31 + 28 + 31 + 30 + 31 + 30 + 31, - 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31, - 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30, - 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31, - 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30, - 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31, -} - -func daysIn(m int, year int) int { - if m == 2 && isLeap(year) { - return 29 - } - return int(daysBefore[m] - daysBefore[m-1]) -} - -func isLeap(year int) bool { - return year%4 == 0 && (year%100 != 0 || year%400 == 0) -} - -func isDigit(r byte) bool { - return r >= '0' && r <= '9' -} diff --git a/vendor/github.com/pelletier/go-toml/v2/doc.go b/vendor/github.com/pelletier/go-toml/v2/doc.go deleted file mode 100644 index b7bc599bde..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package toml is a library to read and write TOML documents. -package toml diff --git a/vendor/github.com/pelletier/go-toml/v2/errors.go b/vendor/github.com/pelletier/go-toml/v2/errors.go deleted file mode 100644 index 309733f1f9..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/errors.go +++ /dev/null @@ -1,252 +0,0 @@ -package toml - -import ( - "fmt" - "strconv" - "strings" - - "github.com/pelletier/go-toml/v2/internal/danger" - "github.com/pelletier/go-toml/v2/unstable" -) - -// DecodeError represents an error encountered during the parsing or decoding -// of a TOML document. -// -// In addition to the error message, it contains the position in the document -// where it happened, as well as a human-readable representation that shows -// where the error occurred in the document. -type DecodeError struct { - message string - line int - column int - key Key - - human string -} - -// StrictMissingError occurs in a TOML document that does not have a -// corresponding field in the target value. It contains all the missing fields -// in Errors. -// -// Emitted by Decoder when DisallowUnknownFields() was called. -type StrictMissingError struct { - // One error per field that could not be found. - Errors []DecodeError -} - -// Error returns the canonical string for this error. -func (s *StrictMissingError) Error() string { - return "strict mode: fields in the document are missing in the target struct" -} - -// String returns a human readable description of all errors. -func (s *StrictMissingError) String() string { - var buf strings.Builder - - for i, e := range s.Errors { - if i > 0 { - buf.WriteString("\n---\n") - } - - buf.WriteString(e.String()) - } - - return buf.String() -} - -type Key []string - -// Error returns the error message contained in the DecodeError. -func (e *DecodeError) Error() string { - return "toml: " + e.message -} - -// String returns the human-readable contextualized error. This string is multi-line. -func (e *DecodeError) String() string { - return e.human -} - -// Position returns the (line, column) pair indicating where the error -// occurred in the document. Positions are 1-indexed. -func (e *DecodeError) Position() (row int, column int) { - return e.line, e.column -} - -// Key that was being processed when the error occurred. The key is present only -// if this DecodeError is part of a StrictMissingError. -func (e *DecodeError) Key() Key { - return e.key -} - -// decodeErrorFromHighlight creates a DecodeError referencing a highlighted -// range of bytes from document. -// -// highlight needs to be a sub-slice of document, or this function panics. -// -// The function copies all bytes used in DecodeError, so that document and -// highlight can be freely deallocated. -// -//nolint:funlen -func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError { - offset := danger.SubsliceOffset(document, de.Highlight) - - errMessage := de.Error() - errLine, errColumn := positionAtEnd(document[:offset]) - before, after := linesOfContext(document, de.Highlight, offset, 3) - - var buf strings.Builder - - maxLine := errLine + len(after) - 1 - lineColumnWidth := len(strconv.Itoa(maxLine)) - - // Write the lines of context strictly before the error. - for i := len(before) - 1; i > 0; i-- { - line := errLine - i - buf.WriteString(formatLineNumber(line, lineColumnWidth)) - buf.WriteString("|") - - if len(before[i]) > 0 { - buf.WriteString(" ") - buf.Write(before[i]) - } - - buf.WriteRune('\n') - } - - // Write the document line that contains the error. - - buf.WriteString(formatLineNumber(errLine, lineColumnWidth)) - buf.WriteString("| ") - - if len(before) > 0 { - buf.Write(before[0]) - } - - buf.Write(de.Highlight) - - if len(after) > 0 { - buf.Write(after[0]) - } - - buf.WriteRune('\n') - - // Write the line with the error message itself (so it does not have a line - // number). - - buf.WriteString(strings.Repeat(" ", lineColumnWidth)) - buf.WriteString("| ") - - if len(before) > 0 { - buf.WriteString(strings.Repeat(" ", len(before[0]))) - } - - buf.WriteString(strings.Repeat("~", len(de.Highlight))) - - if len(errMessage) > 0 { - buf.WriteString(" ") - buf.WriteString(errMessage) - } - - // Write the lines of context strictly after the error. - - for i := 1; i < len(after); i++ { - buf.WriteRune('\n') - line := errLine + i - buf.WriteString(formatLineNumber(line, lineColumnWidth)) - buf.WriteString("|") - - if len(after[i]) > 0 { - buf.WriteString(" ") - buf.Write(after[i]) - } - } - - return &DecodeError{ - message: errMessage, - line: errLine, - column: errColumn, - key: de.Key, - human: buf.String(), - } -} - -func formatLineNumber(line int, width int) string { - format := "%" + strconv.Itoa(width) + "d" - - return fmt.Sprintf(format, line) -} - -func linesOfContext(document []byte, highlight []byte, offset int, linesAround int) ([][]byte, [][]byte) { - return beforeLines(document, offset, linesAround), afterLines(document, highlight, offset, linesAround) -} - -func beforeLines(document []byte, offset int, linesAround int) [][]byte { - var beforeLines [][]byte - - // Walk the document backward from the highlight to find previous lines - // of context. - rest := document[:offset] -backward: - for o := len(rest) - 1; o >= 0 && len(beforeLines) <= linesAround && len(rest) > 0; { - switch { - case rest[o] == '\n': - // handle individual lines - beforeLines = append(beforeLines, rest[o+1:]) - rest = rest[:o] - o = len(rest) - 1 - case o == 0: - // add the first line only if it's non-empty - beforeLines = append(beforeLines, rest) - - break backward - default: - o-- - } - } - - return beforeLines -} - -func afterLines(document []byte, highlight []byte, offset int, linesAround int) [][]byte { - var afterLines [][]byte - - // Walk the document forward from the highlight to find the following - // lines of context. - rest := document[offset+len(highlight):] -forward: - for o := 0; o < len(rest) && len(afterLines) <= linesAround; { - switch { - case rest[o] == '\n': - // handle individual lines - afterLines = append(afterLines, rest[:o]) - rest = rest[o+1:] - o = 0 - - case o == len(rest)-1: - // add last line only if it's non-empty - afterLines = append(afterLines, rest) - - break forward - default: - o++ - } - } - - return afterLines -} - -func positionAtEnd(b []byte) (row int, column int) { - row = 1 - column = 1 - - for _, c := range b { - if c == '\n' { - row++ - column = 1 - } else { - column++ - } - } - - return -} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go b/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go deleted file mode 100644 index 80f698db4b..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go +++ /dev/null @@ -1,42 +0,0 @@ -package characters - -var invalidAsciiTable = [256]bool{ - 0x00: true, - 0x01: true, - 0x02: true, - 0x03: true, - 0x04: true, - 0x05: true, - 0x06: true, - 0x07: true, - 0x08: true, - // 0x09 TAB - // 0x0A LF - 0x0B: true, - 0x0C: true, - // 0x0D CR - 0x0E: true, - 0x0F: true, - 0x10: true, - 0x11: true, - 0x12: true, - 0x13: true, - 0x14: true, - 0x15: true, - 0x16: true, - 0x17: true, - 0x18: true, - 0x19: true, - 0x1A: true, - 0x1B: true, - 0x1C: true, - 0x1D: true, - 0x1E: true, - 0x1F: true, - // 0x20 - 0x7E Printable ASCII characters - 0x7F: true, -} - -func InvalidAscii(b byte) bool { - return invalidAsciiTable[b] -} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go b/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go deleted file mode 100644 index db4f45acbf..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go +++ /dev/null @@ -1,199 +0,0 @@ -package characters - -import ( - "unicode/utf8" -) - -type utf8Err struct { - Index int - Size int -} - -func (u utf8Err) Zero() bool { - return u.Size == 0 -} - -// Verified that a given string is only made of valid UTF-8 characters allowed -// by the TOML spec: -// -// Any Unicode character may be used except those that must be escaped: -// quotation mark, backslash, and the control characters other than tab (U+0000 -// to U+0008, U+000A to U+001F, U+007F). -// -// It is a copy of the Go 1.17 utf8.Valid implementation, tweaked to exit early -// when a character is not allowed. -// -// The returned utf8Err is Zero() if the string is valid, or contains the byte -// index and size of the invalid character. -// -// quotation mark => already checked -// backslash => already checked -// 0-0x8 => invalid -// 0x9 => tab, ok -// 0xA - 0x1F => invalid -// 0x7F => invalid -func Utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { - // Fast path. Check for and skip 8 bytes of ASCII characters per iteration. - offset := 0 - for len(p) >= 8 { - // Combining two 32 bit loads allows the same code to be used - // for 32 and 64 bit platforms. - // The compiler can generate a 32bit load for first32 and second32 - // on many platforms. See test/codegen/memcombine.go. - first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 - second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24 - if (first32|second32)&0x80808080 != 0 { - // Found a non ASCII byte (>= RuneSelf). - break - } - - for i, b := range p[:8] { - if InvalidAscii(b) { - err.Index = offset + i - err.Size = 1 - return - } - } - - p = p[8:] - offset += 8 - } - n := len(p) - for i := 0; i < n; { - pi := p[i] - if pi < utf8.RuneSelf { - if InvalidAscii(pi) { - err.Index = offset + i - err.Size = 1 - return - } - i++ - continue - } - x := first[pi] - if x == xx { - // Illegal starter byte. - err.Index = offset + i - err.Size = 1 - return - } - size := int(x & 7) - if i+size > n { - // Short or invalid. - err.Index = offset + i - err.Size = n - i - return - } - accept := acceptRanges[x>>4] - if c := p[i+1]; c < accept.lo || accept.hi < c { - err.Index = offset + i - err.Size = 2 - return - } else if size == 2 { - } else if c := p[i+2]; c < locb || hicb < c { - err.Index = offset + i - err.Size = 3 - return - } else if size == 3 { - } else if c := p[i+3]; c < locb || hicb < c { - err.Index = offset + i - err.Size = 4 - return - } - i += size - } - return -} - -// Return the size of the next rune if valid, 0 otherwise. -func Utf8ValidNext(p []byte) int { - c := p[0] - - if c < utf8.RuneSelf { - if InvalidAscii(c) { - return 0 - } - return 1 - } - - x := first[c] - if x == xx { - // Illegal starter byte. - return 0 - } - size := int(x & 7) - if size > len(p) { - // Short or invalid. - return 0 - } - accept := acceptRanges[x>>4] - if c := p[1]; c < accept.lo || accept.hi < c { - return 0 - } else if size == 2 { - } else if c := p[2]; c < locb || hicb < c { - return 0 - } else if size == 3 { - } else if c := p[3]; c < locb || hicb < c { - return 0 - } - - return size -} - -// acceptRange gives the range of valid values for the second byte in a UTF-8 -// sequence. -type acceptRange struct { - lo uint8 // lowest value for second byte. - hi uint8 // highest value for second byte. -} - -// acceptRanges has size 16 to avoid bounds checks in the code that uses it. -var acceptRanges = [16]acceptRange{ - 0: {locb, hicb}, - 1: {0xA0, hicb}, - 2: {locb, 0x9F}, - 3: {0x90, hicb}, - 4: {locb, 0x8F}, -} - -// first is information about the first byte in a UTF-8 sequence. -var first = [256]uint8{ - // 1 2 3 4 5 6 7 8 9 A B C D E F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F - // 1 2 3 4 5 6 7 8 9 A B C D E F - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF - xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF - s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF - s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF - s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF -} - -const ( - // The default lowest and highest continuation byte. - locb = 0b10000000 - hicb = 0b10111111 - - // These names of these constants are chosen to give nice alignment in the - // table below. The first nibble is an index into acceptRanges or F for - // special one-byte cases. The second nibble is the Rune length or the - // Status for the special one-byte case. - xx = 0xF1 // invalid: size 1 - as = 0xF0 // ASCII: size 1 - s1 = 0x02 // accept 0, size 2 - s2 = 0x13 // accept 1, size 3 - s3 = 0x03 // accept 0, size 3 - s4 = 0x23 // accept 2, size 3 - s5 = 0x34 // accept 3, size 4 - s6 = 0x04 // accept 0, size 4 - s7 = 0x44 // accept 4, size 4 -) diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go b/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go deleted file mode 100644 index e38e1131b8..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go +++ /dev/null @@ -1,65 +0,0 @@ -package danger - -import ( - "fmt" - "reflect" - "unsafe" -) - -const maxInt = uintptr(int(^uint(0) >> 1)) - -func SubsliceOffset(data []byte, subslice []byte) int { - datap := (*reflect.SliceHeader)(unsafe.Pointer(&data)) - hlp := (*reflect.SliceHeader)(unsafe.Pointer(&subslice)) - - if hlp.Data < datap.Data { - panic(fmt.Errorf("subslice address (%d) is before data address (%d)", hlp.Data, datap.Data)) - } - offset := hlp.Data - datap.Data - - if offset > maxInt { - panic(fmt.Errorf("slice offset larger than int (%d)", offset)) - } - - intoffset := int(offset) - - if intoffset > datap.Len { - panic(fmt.Errorf("slice offset (%d) is farther than data length (%d)", intoffset, datap.Len)) - } - - if intoffset+hlp.Len > datap.Len { - panic(fmt.Errorf("slice ends (%d+%d) is farther than data length (%d)", intoffset, hlp.Len, datap.Len)) - } - - return intoffset -} - -func BytesRange(start []byte, end []byte) []byte { - if start == nil || end == nil { - panic("cannot call BytesRange with nil") - } - startp := (*reflect.SliceHeader)(unsafe.Pointer(&start)) - endp := (*reflect.SliceHeader)(unsafe.Pointer(&end)) - - if startp.Data > endp.Data { - panic(fmt.Errorf("start pointer address (%d) is after end pointer address (%d)", startp.Data, endp.Data)) - } - - l := startp.Len - endLen := int(endp.Data-startp.Data) + endp.Len - if endLen > l { - l = endLen - } - - if l > startp.Cap { - panic(fmt.Errorf("range length is larger than capacity")) - } - - return start[:l] -} - -func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer { - // TODO: replace with unsafe.Add when Go 1.17 is released - // https://github.com/golang/go/issues/40481 - return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset)) -} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go b/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go deleted file mode 100644 index 9d41c28a2f..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go +++ /dev/null @@ -1,23 +0,0 @@ -package danger - -import ( - "reflect" - "unsafe" -) - -// typeID is used as key in encoder and decoder caches to enable using -// the optimize runtime.mapaccess2_fast64 function instead of the more -// expensive lookup if we were to use reflect.Type as map key. -// -// typeID holds the pointer to the reflect.Type value, which is unique -// in the program. -// -// https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61 -type TypeID unsafe.Pointer - -func MakeTypeID(t reflect.Type) TypeID { - // reflect.Type has the fields: - // typ unsafe.Pointer - // ptr unsafe.Pointer - return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1]) -} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go deleted file mode 100644 index 149b17f538..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go +++ /dev/null @@ -1,48 +0,0 @@ -package tracker - -import "github.com/pelletier/go-toml/v2/unstable" - -// KeyTracker is a tracker that keeps track of the current Key as the AST is -// walked. -type KeyTracker struct { - k []string -} - -// UpdateTable sets the state of the tracker with the AST table node. -func (t *KeyTracker) UpdateTable(node *unstable.Node) { - t.reset() - t.Push(node) -} - -// UpdateArrayTable sets the state of the tracker with the AST array table node. -func (t *KeyTracker) UpdateArrayTable(node *unstable.Node) { - t.reset() - t.Push(node) -} - -// Push the given key on the stack. -func (t *KeyTracker) Push(node *unstable.Node) { - it := node.Key() - for it.Next() { - t.k = append(t.k, string(it.Node().Data)) - } -} - -// Pop key from stack. -func (t *KeyTracker) Pop(node *unstable.Node) { - it := node.Key() - for it.Next() { - t.k = t.k[:len(t.k)-1] - } -} - -// Key returns the current key -func (t *KeyTracker) Key() []string { - k := make([]string, len(t.k)) - copy(k, t.k) - return k -} - -func (t *KeyTracker) reset() { - t.k = t.k[:0] -} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go deleted file mode 100644 index 40e23f8304..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go +++ /dev/null @@ -1,356 +0,0 @@ -package tracker - -import ( - "bytes" - "fmt" - "sync" - - "github.com/pelletier/go-toml/v2/unstable" -) - -type keyKind uint8 - -const ( - invalidKind keyKind = iota - valueKind - tableKind - arrayTableKind -) - -func (k keyKind) String() string { - switch k { - case invalidKind: - return "invalid" - case valueKind: - return "value" - case tableKind: - return "table" - case arrayTableKind: - return "array table" - } - panic("missing keyKind string mapping") -} - -// SeenTracker tracks which keys have been seen with which TOML type to flag -// duplicates and mismatches according to the spec. -// -// Each node in the visited tree is represented by an entry. Each entry has an -// identifier, which is provided by a counter. Entries are stored in the array -// entries. As new nodes are discovered (referenced for the first time in the -// TOML document), entries are created and appended to the array. An entry -// points to its parent using its id. -// -// To find whether a given key (sequence of []byte) has already been visited, -// the entries are linearly searched, looking for one with the right name and -// parent id. -// -// Given that all keys appear in the document after their parent, it is -// guaranteed that all descendants of a node are stored after the node, this -// speeds up the search process. -// -// When encountering [[array tables]], the descendants of that node are removed -// to allow that branch of the tree to be "rediscovered". To maintain the -// invariant above, the deletion process needs to keep the order of entries. -// This results in more copies in that case. -type SeenTracker struct { - entries []entry - currentIdx int -} - -var pool sync.Pool - -func (s *SeenTracker) reset() { - // Always contains a root element at index 0. - s.currentIdx = 0 - if len(s.entries) == 0 { - s.entries = make([]entry, 1, 2) - } else { - s.entries = s.entries[:1] - } - s.entries[0].child = -1 - s.entries[0].next = -1 -} - -type entry struct { - // Use -1 to indicate no child or no sibling. - child int - next int - - name []byte - kind keyKind - explicit bool - kv bool -} - -// Find the index of the child of parentIdx with key k. Returns -1 if -// it does not exist. -func (s *SeenTracker) find(parentIdx int, k []byte) int { - for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { - if bytes.Equal(s.entries[i].name, k) { - return i - } - } - return -1 -} - -// Remove all descendants of node at position idx. -func (s *SeenTracker) clear(idx int) { - if idx >= len(s.entries) { - return - } - - for i := s.entries[idx].child; i >= 0; { - next := s.entries[i].next - n := s.entries[0].next - s.entries[0].next = i - s.entries[i].next = n - s.entries[i].name = nil - s.clear(i) - i = next - } - - s.entries[idx].child = -1 -} - -func (s *SeenTracker) create(parentIdx int, name []byte, kind keyKind, explicit bool, kv bool) int { - e := entry{ - child: -1, - next: s.entries[parentIdx].child, - - name: name, - kind: kind, - explicit: explicit, - kv: kv, - } - var idx int - if s.entries[0].next >= 0 { - idx = s.entries[0].next - s.entries[0].next = s.entries[idx].next - s.entries[idx] = e - } else { - idx = len(s.entries) - s.entries = append(s.entries, e) - } - - s.entries[parentIdx].child = idx - - return idx -} - -func (s *SeenTracker) setExplicitFlag(parentIdx int) { - for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { - if s.entries[i].kv { - s.entries[i].explicit = true - s.entries[i].kv = false - } - s.setExplicitFlag(i) - } -} - -// CheckExpression takes a top-level node and checks that it does not contain -// keys that have been seen in previous calls, and validates that types are -// consistent. -func (s *SeenTracker) CheckExpression(node *unstable.Node) error { - if s.entries == nil { - s.reset() - } - switch node.Kind { - case unstable.KeyValue: - return s.checkKeyValue(node) - case unstable.Table: - return s.checkTable(node) - case unstable.ArrayTable: - return s.checkArrayTable(node) - default: - panic(fmt.Errorf("this should not be a top level node type: %s", node.Kind)) - } -} - -func (s *SeenTracker) checkTable(node *unstable.Node) error { - if s.currentIdx >= 0 { - s.setExplicitFlag(s.currentIdx) - } - - it := node.Key() - - parentIdx := 0 - - // This code is duplicated in checkArrayTable. This is because factoring - // it in a function requires to copy the iterator, or allocate it to the - // heap, which is not cheap. - for it.Next() { - if it.IsLast() { - break - } - - k := it.Node().Data - - idx := s.find(parentIdx, k) - - if idx < 0 { - idx = s.create(parentIdx, k, tableKind, false, false) - } else { - entry := s.entries[idx] - if entry.kind == valueKind { - return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) - } - } - parentIdx = idx - } - - k := it.Node().Data - idx := s.find(parentIdx, k) - - if idx >= 0 { - kind := s.entries[idx].kind - if kind != tableKind { - return fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) - } - if s.entries[idx].explicit { - return fmt.Errorf("toml: table %s already exists", string(k)) - } - s.entries[idx].explicit = true - } else { - idx = s.create(parentIdx, k, tableKind, true, false) - } - - s.currentIdx = idx - - return nil -} - -func (s *SeenTracker) checkArrayTable(node *unstable.Node) error { - if s.currentIdx >= 0 { - s.setExplicitFlag(s.currentIdx) - } - - it := node.Key() - - parentIdx := 0 - - for it.Next() { - if it.IsLast() { - break - } - - k := it.Node().Data - - idx := s.find(parentIdx, k) - - if idx < 0 { - idx = s.create(parentIdx, k, tableKind, false, false) - } else { - entry := s.entries[idx] - if entry.kind == valueKind { - return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) - } - } - - parentIdx = idx - } - - k := it.Node().Data - idx := s.find(parentIdx, k) - - if idx >= 0 { - kind := s.entries[idx].kind - if kind != arrayTableKind { - return fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) - } - s.clear(idx) - } else { - idx = s.create(parentIdx, k, arrayTableKind, true, false) - } - - s.currentIdx = idx - - return nil -} - -func (s *SeenTracker) checkKeyValue(node *unstable.Node) error { - parentIdx := s.currentIdx - it := node.Key() - - for it.Next() { - k := it.Node().Data - - idx := s.find(parentIdx, k) - - if idx < 0 { - idx = s.create(parentIdx, k, tableKind, false, true) - } else { - entry := s.entries[idx] - if it.IsLast() { - return fmt.Errorf("toml: key %s is already defined", string(k)) - } else if entry.kind != tableKind { - return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) - } else if entry.explicit { - return fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) - } - } - - parentIdx = idx - } - - s.entries[parentIdx].kind = valueKind - - value := node.Value() - - switch value.Kind { - case unstable.InlineTable: - return s.checkInlineTable(value) - case unstable.Array: - return s.checkArray(value) - } - - return nil -} - -func (s *SeenTracker) checkArray(node *unstable.Node) error { - it := node.Children() - for it.Next() { - n := it.Node() - switch n.Kind { - case unstable.InlineTable: - err := s.checkInlineTable(n) - if err != nil { - return err - } - case unstable.Array: - err := s.checkArray(n) - if err != nil { - return err - } - } - } - return nil -} - -func (s *SeenTracker) checkInlineTable(node *unstable.Node) error { - if pool.New == nil { - pool.New = func() interface{} { - return &SeenTracker{} - } - } - - s = pool.Get().(*SeenTracker) - s.reset() - - it := node.Children() - for it.Next() { - n := it.Node() - err := s.checkKeyValue(n) - if err != nil { - return err - } - } - - // As inline tables are self-contained, the tracker does not - // need to retain the details of what they contain. The - // keyValue element that creates the inline table is kept to - // mark the presence of the inline table and prevent - // redefinition of its keys: check* functions cannot walk into - // a value. - pool.Put(s) - return nil -} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go deleted file mode 100644 index bf0317392f..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go +++ /dev/null @@ -1 +0,0 @@ -package tracker diff --git a/vendor/github.com/pelletier/go-toml/v2/localtime.go b/vendor/github.com/pelletier/go-toml/v2/localtime.go deleted file mode 100644 index a856bfdb0d..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/localtime.go +++ /dev/null @@ -1,122 +0,0 @@ -package toml - -import ( - "fmt" - "strings" - "time" - - "github.com/pelletier/go-toml/v2/unstable" -) - -// LocalDate represents a calendar day in no specific timezone. -type LocalDate struct { - Year int - Month int - Day int -} - -// AsTime converts d into a specific time instance at midnight in zone. -func (d LocalDate) AsTime(zone *time.Location) time.Time { - return time.Date(d.Year, time.Month(d.Month), d.Day, 0, 0, 0, 0, zone) -} - -// String returns RFC 3339 representation of d. -func (d LocalDate) String() string { - return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) -} - -// MarshalText returns RFC 3339 representation of d. -func (d LocalDate) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText parses b using RFC 3339 to fill d. -func (d *LocalDate) UnmarshalText(b []byte) error { - res, err := parseLocalDate(b) - if err != nil { - return err - } - *d = res - return nil -} - -// LocalTime represents a time of day of no specific day in no specific -// timezone. -type LocalTime struct { - Hour int // Hour of the day: [0; 24[ - Minute int // Minute of the hour: [0; 60[ - Second int // Second of the minute: [0; 60[ - Nanosecond int // Nanoseconds within the second: [0, 1000000000[ - Precision int // Number of digits to display for Nanosecond. -} - -// String returns RFC 3339 representation of d. -// If d.Nanosecond and d.Precision are zero, the time won't have a nanosecond -// component. If d.Nanosecond > 0 but d.Precision = 0, then the minimum number -// of digits for nanoseconds is provided. -func (d LocalTime) String() string { - s := fmt.Sprintf("%02d:%02d:%02d", d.Hour, d.Minute, d.Second) - - if d.Precision > 0 { - s += fmt.Sprintf(".%09d", d.Nanosecond)[:d.Precision+1] - } else if d.Nanosecond > 0 { - // Nanoseconds are specified, but precision is not provided. Use the - // minimum. - s += strings.Trim(fmt.Sprintf(".%09d", d.Nanosecond), "0") - } - - return s -} - -// MarshalText returns RFC 3339 representation of d. -func (d LocalTime) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText parses b using RFC 3339 to fill d. -func (d *LocalTime) UnmarshalText(b []byte) error { - res, left, err := parseLocalTime(b) - if err == nil && len(left) != 0 { - err = unstable.NewParserError(left, "extra characters") - } - if err != nil { - return err - } - *d = res - return nil -} - -// LocalDateTime represents a time of a specific day in no specific timezone. -type LocalDateTime struct { - LocalDate - LocalTime -} - -// AsTime converts d into a specific time instance in zone. -func (d LocalDateTime) AsTime(zone *time.Location) time.Time { - return time.Date(d.Year, time.Month(d.Month), d.Day, d.Hour, d.Minute, d.Second, d.Nanosecond, zone) -} - -// String returns RFC 3339 representation of d. -func (d LocalDateTime) String() string { - return d.LocalDate.String() + "T" + d.LocalTime.String() -} - -// MarshalText returns RFC 3339 representation of d. -func (d LocalDateTime) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText parses b using RFC 3339 to fill d. -func (d *LocalDateTime) UnmarshalText(data []byte) error { - res, left, err := parseLocalDateTime(data) - if err == nil && len(left) != 0 { - err = unstable.NewParserError(left, "extra characters") - } - if err != nil { - return err - } - - *d = res - return nil -} diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go deleted file mode 100644 index 6fe78533c1..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ /dev/null @@ -1,1090 +0,0 @@ -package toml - -import ( - "bytes" - "encoding" - "fmt" - "io" - "math" - "reflect" - "sort" - "strconv" - "strings" - "time" - "unicode" - - "github.com/pelletier/go-toml/v2/internal/characters" -) - -// Marshal serializes a Go value as a TOML document. -// -// It is a shortcut for Encoder.Encode() with the default options. -func Marshal(v interface{}) ([]byte, error) { - var buf bytes.Buffer - enc := NewEncoder(&buf) - - err := enc.Encode(v) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// Encoder writes a TOML document to an output stream. -type Encoder struct { - // output - w io.Writer - - // global settings - tablesInline bool - arraysMultiline bool - indentSymbol string - indentTables bool -} - -// NewEncoder returns a new Encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - indentSymbol: " ", - } -} - -// SetTablesInline forces the encoder to emit all tables inline. -// -// This behavior can be controlled on an individual struct field basis with the -// inline tag: -// -// MyField `toml:",inline"` -func (enc *Encoder) SetTablesInline(inline bool) *Encoder { - enc.tablesInline = inline - return enc -} - -// SetArraysMultiline forces the encoder to emit all arrays with one element per -// line. -// -// This behavior can be controlled on an individual struct field basis with the multiline tag: -// -// MyField `multiline:"true"` -func (enc *Encoder) SetArraysMultiline(multiline bool) *Encoder { - enc.arraysMultiline = multiline - return enc -} - -// SetIndentSymbol defines the string that should be used for indentation. The -// provided string is repeated for each indentation level. Defaults to two -// spaces. -func (enc *Encoder) SetIndentSymbol(s string) *Encoder { - enc.indentSymbol = s - return enc -} - -// SetIndentTables forces the encoder to intent tables and array tables. -func (enc *Encoder) SetIndentTables(indent bool) *Encoder { - enc.indentTables = indent - return enc -} - -// Encode writes a TOML representation of v to the stream. -// -// If v cannot be represented to TOML it returns an error. -// -// # Encoding rules -// -// A top level slice containing only maps or structs is encoded as [[table -// array]]. -// -// All slices not matching rule 1 are encoded as [array]. As a result, any map -// or struct they contain is encoded as an {inline table}. -// -// Nil interfaces and nil pointers are not supported. -// -// Keys in key-values always have one part. -// -// Intermediate tables are always printed. -// -// By default, strings are encoded as literal string, unless they contain either -// a newline character or a single quote. In that case they are emitted as -// quoted strings. -// -// Unsigned integers larger than math.MaxInt64 cannot be encoded. Doing so -// results in an error. This rule exists because the TOML specification only -// requires parsers to support at least the 64 bits integer range. Allowing -// larger numbers would create non-standard TOML documents, which may not be -// readable (at best) by other implementations. To encode such numbers, a -// solution is a custom type that implements encoding.TextMarshaler. -// -// When encoding structs, fields are encoded in order of definition, with their -// exact name. -// -// Tables and array tables are separated by empty lines. However, consecutive -// subtables definitions are not. For example: -// -// [top1] -// -// [top2] -// [top2.child1] -// -// [[array]] -// -// [[array]] -// [array.child2] -// -// # Struct tags -// -// The encoding of each public struct field can be customized by the format -// string in the "toml" key of the struct field's tag. This follows -// encoding/json's convention. The format string starts with the name of the -// field, optionally followed by a comma-separated list of options. The name may -// be empty in order to provide options without overriding the default name. -// -// The "multiline" option emits strings as quoted multi-line TOML strings. It -// has no effect on fields that would not be encoded as strings. -// -// The "inline" option turns fields that would be emitted as tables into inline -// tables instead. It has no effect on other fields. -// -// The "omitempty" option prevents empty values or groups from being emitted. -// -// The "commented" option prefixes the value and all its children with a comment -// symbol. -// -// In addition to the "toml" tag struct tag, a "comment" tag can be used to emit -// a TOML comment before the value being annotated. Comments are ignored inside -// inline tables. For array tables, the comment is only present before the first -// element of the array. -func (enc *Encoder) Encode(v interface{}) error { - var ( - b []byte - ctx encoderCtx - ) - - ctx.inline = enc.tablesInline - - if v == nil { - return fmt.Errorf("toml: cannot encode a nil interface") - } - - b, err := enc.encode(b, ctx, reflect.ValueOf(v)) - if err != nil { - return err - } - - _, err = enc.w.Write(b) - if err != nil { - return fmt.Errorf("toml: cannot write: %w", err) - } - - return nil -} - -type valueOptions struct { - multiline bool - omitempty bool - commented bool - comment string -} - -type encoderCtx struct { - // Current top-level key. - parentKey []string - - // Key that should be used for a KV. - key string - // Extra flag to account for the empty string - hasKey bool - - // Set to true to indicate that the encoder is inside a KV, so that all - // tables need to be inlined. - insideKv bool - - // Set to true to skip the first table header in an array table. - skipTableHeader bool - - // Should the next table be encoded as inline - inline bool - - // Indentation level - indent int - - // Prefix the current value with a comment. - commented bool - - // Options coming from struct tags - options valueOptions -} - -func (ctx *encoderCtx) shiftKey() { - if ctx.hasKey { - ctx.parentKey = append(ctx.parentKey, ctx.key) - ctx.clearKey() - } -} - -func (ctx *encoderCtx) setKey(k string) { - ctx.key = k - ctx.hasKey = true -} - -func (ctx *encoderCtx) clearKey() { - ctx.key = "" - ctx.hasKey = false -} - -func (ctx *encoderCtx) isRoot() bool { - return len(ctx.parentKey) == 0 && !ctx.hasKey -} - -func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { - i := v.Interface() - - switch x := i.(type) { - case time.Time: - if x.Nanosecond() > 0 { - return x.AppendFormat(b, time.RFC3339Nano), nil - } - return x.AppendFormat(b, time.RFC3339), nil - case LocalTime: - return append(b, x.String()...), nil - case LocalDate: - return append(b, x.String()...), nil - case LocalDateTime: - return append(b, x.String()...), nil - } - - hasTextMarshaler := v.Type().Implements(textMarshalerType) - if hasTextMarshaler || (v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { - if !hasTextMarshaler { - v = v.Addr() - } - - if ctx.isRoot() { - return nil, fmt.Errorf("toml: type %s implementing the TextMarshaler interface cannot be a root element", v.Type()) - } - - text, err := v.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return nil, err - } - - b = enc.encodeString(b, string(text), ctx.options) - - return b, nil - } - - switch v.Kind() { - // containers - case reflect.Map: - return enc.encodeMap(b, ctx, v) - case reflect.Struct: - return enc.encodeStruct(b, ctx, v) - case reflect.Slice, reflect.Array: - return enc.encodeSlice(b, ctx, v) - case reflect.Interface: - if v.IsNil() { - return nil, fmt.Errorf("toml: encoding a nil interface is not supported") - } - - return enc.encode(b, ctx, v.Elem()) - case reflect.Ptr: - if v.IsNil() { - return enc.encode(b, ctx, reflect.Zero(v.Type().Elem())) - } - - return enc.encode(b, ctx, v.Elem()) - - // values - case reflect.String: - b = enc.encodeString(b, v.String(), ctx.options) - case reflect.Float32: - f := v.Float() - - if math.IsNaN(f) { - b = append(b, "nan"...) - } else if f > math.MaxFloat32 { - b = append(b, "inf"...) - } else if f < -math.MaxFloat32 { - b = append(b, "-inf"...) - } else if math.Trunc(f) == f { - b = strconv.AppendFloat(b, f, 'f', 1, 32) - } else { - b = strconv.AppendFloat(b, f, 'f', -1, 32) - } - case reflect.Float64: - f := v.Float() - if math.IsNaN(f) { - b = append(b, "nan"...) - } else if f > math.MaxFloat64 { - b = append(b, "inf"...) - } else if f < -math.MaxFloat64 { - b = append(b, "-inf"...) - } else if math.Trunc(f) == f { - b = strconv.AppendFloat(b, f, 'f', 1, 64) - } else { - b = strconv.AppendFloat(b, f, 'f', -1, 64) - } - case reflect.Bool: - if v.Bool() { - b = append(b, "true"...) - } else { - b = append(b, "false"...) - } - case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint: - x := v.Uint() - if x > uint64(math.MaxInt64) { - return nil, fmt.Errorf("toml: not encoding uint (%d) greater than max int64 (%d)", x, int64(math.MaxInt64)) - } - b = strconv.AppendUint(b, x, 10) - case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int: - b = strconv.AppendInt(b, v.Int(), 10) - default: - return nil, fmt.Errorf("toml: cannot encode value of type %s", v.Kind()) - } - - return b, nil -} - -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Ptr, reflect.Interface, reflect.Map: - return v.IsNil() - default: - return false - } -} - -func shouldOmitEmpty(options valueOptions, v reflect.Value) bool { - return options.omitempty && isEmptyValue(v) -} - -func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) { - var err error - - if !ctx.inline { - b = enc.encodeComment(ctx.indent, options.comment, b) - b = enc.commented(ctx.commented, b) - b = enc.indent(ctx.indent, b) - } - - b = enc.encodeKey(b, ctx.key) - b = append(b, " = "...) - - // create a copy of the context because the value of a KV shouldn't - // modify the global context. - subctx := ctx - subctx.insideKv = true - subctx.shiftKey() - subctx.options = options - - b, err = enc.encode(b, subctx, v) - if err != nil { - return nil, err - } - - return b, nil -} - -func (enc *Encoder) commented(commented bool, b []byte) []byte { - if commented { - return append(b, "# "...) - } - return b -} - -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Struct: - return isEmptyStruct(v) - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func isEmptyStruct(v reflect.Value) bool { - // TODO: merge with walkStruct and cache. - typ := v.Type() - for i := 0; i < typ.NumField(); i++ { - fieldType := typ.Field(i) - - // only consider exported fields - if fieldType.PkgPath != "" { - continue - } - - tag := fieldType.Tag.Get("toml") - - // special field name to skip field - if tag == "-" { - continue - } - - f := v.Field(i) - - if !isEmptyValue(f) { - return false - } - } - - return true -} - -const literalQuote = '\'' - -func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byte { - if needsQuoting(v) { - return enc.encodeQuotedString(options.multiline, b, v) - } - - return enc.encodeLiteralString(b, v) -} - -func needsQuoting(v string) bool { - // TODO: vectorize - for _, b := range []byte(v) { - if b == '\'' || b == '\r' || b == '\n' || characters.InvalidAscii(b) { - return true - } - } - return false -} - -// caller should have checked that the string does not contain new lines or ' . -func (enc *Encoder) encodeLiteralString(b []byte, v string) []byte { - b = append(b, literalQuote) - b = append(b, v...) - b = append(b, literalQuote) - - return b -} - -func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byte { - stringQuote := `"` - - if multiline { - stringQuote = `"""` - } - - b = append(b, stringQuote...) - if multiline { - b = append(b, '\n') - } - - const ( - hextable = "0123456789ABCDEF" - // U+0000 to U+0008, U+000A to U+001F, U+007F - nul = 0x0 - bs = 0x8 - lf = 0xa - us = 0x1f - del = 0x7f - ) - - for _, r := range []byte(v) { - switch r { - case '\\': - b = append(b, `\\`...) - case '"': - b = append(b, `\"`...) - case '\b': - b = append(b, `\b`...) - case '\f': - b = append(b, `\f`...) - case '\n': - if multiline { - b = append(b, r) - } else { - b = append(b, `\n`...) - } - case '\r': - b = append(b, `\r`...) - case '\t': - b = append(b, `\t`...) - default: - switch { - case r >= nul && r <= bs, r >= lf && r <= us, r == del: - b = append(b, `\u00`...) - b = append(b, hextable[r>>4]) - b = append(b, hextable[r&0x0f]) - default: - b = append(b, r) - } - } - } - - b = append(b, stringQuote...) - - return b -} - -// caller should have checked that the string is in A-Z / a-z / 0-9 / - / _ . -func (enc *Encoder) encodeUnquotedKey(b []byte, v string) []byte { - return append(b, v...) -} - -func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error) { - if len(ctx.parentKey) == 0 { - return b, nil - } - - b = enc.encodeComment(ctx.indent, ctx.options.comment, b) - - b = enc.commented(ctx.commented, b) - - b = enc.indent(ctx.indent, b) - - b = append(b, '[') - - b = enc.encodeKey(b, ctx.parentKey[0]) - - for _, k := range ctx.parentKey[1:] { - b = append(b, '.') - b = enc.encodeKey(b, k) - } - - b = append(b, "]\n"...) - - return b, nil -} - -//nolint:cyclop -func (enc *Encoder) encodeKey(b []byte, k string) []byte { - needsQuotation := false - cannotUseLiteral := false - - if len(k) == 0 { - return append(b, "''"...) - } - - for _, c := range k { - if (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-' || c == '_' { - continue - } - - if c == literalQuote { - cannotUseLiteral = true - } - - needsQuotation = true - } - - if needsQuotation && needsQuoting(k) { - cannotUseLiteral = true - } - - switch { - case cannotUseLiteral: - return enc.encodeQuotedString(false, b, k) - case needsQuotation: - return enc.encodeLiteralString(b, k) - default: - return enc.encodeUnquotedKey(b, k) - } -} - -func (enc *Encoder) keyToString(k reflect.Value) (string, error) { - keyType := k.Type() - switch { - case keyType.Kind() == reflect.String: - return k.String(), nil - - case keyType.Implements(textMarshalerType): - keyB, err := k.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return "", fmt.Errorf("toml: error marshalling key %v from text: %w", k, err) - } - return string(keyB), nil - } - return "", fmt.Errorf("toml: type %s is not supported as a map key", keyType.Kind()) -} - -func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { - var ( - t table - emptyValueOptions valueOptions - ) - - iter := v.MapRange() - for iter.Next() { - v := iter.Value() - - if isNil(v) { - continue - } - - k, err := enc.keyToString(iter.Key()) - if err != nil { - return nil, err - } - - if willConvertToTableOrArrayTable(ctx, v) { - t.pushTable(k, v, emptyValueOptions) - } else { - t.pushKV(k, v, emptyValueOptions) - } - } - - sortEntriesByKey(t.kvs) - sortEntriesByKey(t.tables) - - return enc.encodeTable(b, ctx, t) -} - -func sortEntriesByKey(e []entry) { - sort.Slice(e, func(i, j int) bool { - return e[i].Key < e[j].Key - }) -} - -type entry struct { - Key string - Value reflect.Value - Options valueOptions -} - -type table struct { - kvs []entry - tables []entry -} - -func (t *table) pushKV(k string, v reflect.Value, options valueOptions) { - for _, e := range t.kvs { - if e.Key == k { - return - } - } - - t.kvs = append(t.kvs, entry{Key: k, Value: v, Options: options}) -} - -func (t *table) pushTable(k string, v reflect.Value, options valueOptions) { - for _, e := range t.tables { - if e.Key == k { - return - } - } - t.tables = append(t.tables, entry{Key: k, Value: v, Options: options}) -} - -func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { - // TODO: cache this - typ := v.Type() - for i := 0; i < typ.NumField(); i++ { - fieldType := typ.Field(i) - - // only consider exported fields - if fieldType.PkgPath != "" { - continue - } - - tag := fieldType.Tag.Get("toml") - - // special field name to skip field - if tag == "-" { - continue - } - - k, opts := parseTag(tag) - if !isValidName(k) { - k = "" - } - - f := v.Field(i) - - if k == "" { - if fieldType.Anonymous { - if fieldType.Type.Kind() == reflect.Struct { - walkStruct(ctx, t, f) - } - continue - } else { - k = fieldType.Name - } - } - - if isNil(f) { - continue - } - - options := valueOptions{ - multiline: opts.multiline, - omitempty: opts.omitempty, - commented: opts.commented, - comment: fieldType.Tag.Get("comment"), - } - - if opts.inline || !willConvertToTableOrArrayTable(ctx, f) { - t.pushKV(k, f, options) - } else { - t.pushTable(k, f, options) - } - } -} - -func (enc *Encoder) encodeStruct(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { - var t table - - walkStruct(ctx, &t, v) - - return enc.encodeTable(b, ctx, t) -} - -func (enc *Encoder) encodeComment(indent int, comment string, b []byte) []byte { - for len(comment) > 0 { - var line string - idx := strings.IndexByte(comment, '\n') - if idx >= 0 { - line = comment[:idx] - comment = comment[idx+1:] - } else { - line = comment - comment = "" - } - b = enc.indent(indent, b) - b = append(b, "# "...) - b = append(b, line...) - b = append(b, '\n') - } - return b -} - -func isValidName(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - case !unicode.IsLetter(c) && !unicode.IsDigit(c): - return false - } - } - return true -} - -type tagOptions struct { - multiline bool - inline bool - omitempty bool - commented bool -} - -func parseTag(tag string) (string, tagOptions) { - opts := tagOptions{} - - idx := strings.Index(tag, ",") - if idx == -1 { - return tag, opts - } - - raw := tag[idx+1:] - tag = string(tag[:idx]) - for raw != "" { - var o string - i := strings.Index(raw, ",") - if i >= 0 { - o, raw = raw[:i], raw[i+1:] - } else { - o, raw = raw, "" - } - switch o { - case "multiline": - opts.multiline = true - case "inline": - opts.inline = true - case "omitempty": - opts.omitempty = true - case "commented": - opts.commented = true - } - } - - return tag, opts -} - -func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, error) { - var err error - - ctx.shiftKey() - - if ctx.insideKv || (ctx.inline && !ctx.isRoot()) { - return enc.encodeTableInline(b, ctx, t) - } - - if !ctx.skipTableHeader { - b, err = enc.encodeTableHeader(ctx, b) - if err != nil { - return nil, err - } - - if enc.indentTables && len(ctx.parentKey) > 0 { - ctx.indent++ - } - } - ctx.skipTableHeader = false - - hasNonEmptyKV := false - for _, kv := range t.kvs { - if shouldOmitEmpty(kv.Options, kv.Value) { - continue - } - hasNonEmptyKV = true - - ctx.setKey(kv.Key) - ctx2 := ctx - ctx2.commented = kv.Options.commented || ctx2.commented - - b, err = enc.encodeKv(b, ctx2, kv.Options, kv.Value) - if err != nil { - return nil, err - } - - b = append(b, '\n') - } - - first := true - for _, table := range t.tables { - if shouldOmitEmpty(table.Options, table.Value) { - continue - } - if first { - first = false - if hasNonEmptyKV { - b = append(b, '\n') - } - } else { - b = append(b, "\n"...) - } - - ctx.setKey(table.Key) - - ctx.options = table.Options - ctx2 := ctx - ctx2.commented = ctx2.commented || ctx.options.commented - - b, err = enc.encode(b, ctx2, table.Value) - if err != nil { - return nil, err - } - } - - return b, nil -} - -func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte, error) { - var err error - - b = append(b, '{') - - first := true - for _, kv := range t.kvs { - if shouldOmitEmpty(kv.Options, kv.Value) { - continue - } - - if first { - first = false - } else { - b = append(b, `, `...) - } - - ctx.setKey(kv.Key) - - b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value) - if err != nil { - return nil, err - } - } - - if len(t.tables) > 0 { - panic("inline table cannot contain nested tables, only key-values") - } - - b = append(b, "}"...) - - return b, nil -} - -func willConvertToTable(ctx encoderCtx, v reflect.Value) bool { - if !v.IsValid() { - return false - } - if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { - return false - } - - t := v.Type() - switch t.Kind() { - case reflect.Map, reflect.Struct: - return !ctx.inline - case reflect.Interface: - return willConvertToTable(ctx, v.Elem()) - case reflect.Ptr: - if v.IsNil() { - return false - } - - return willConvertToTable(ctx, v.Elem()) - default: - return false - } -} - -func willConvertToTableOrArrayTable(ctx encoderCtx, v reflect.Value) bool { - if ctx.insideKv { - return false - } - t := v.Type() - - if t.Kind() == reflect.Interface { - return willConvertToTableOrArrayTable(ctx, v.Elem()) - } - - if t.Kind() == reflect.Slice || t.Kind() == reflect.Array { - if v.Len() == 0 { - // An empty slice should be a kv = []. - return false - } - - for i := 0; i < v.Len(); i++ { - t := willConvertToTable(ctx, v.Index(i)) - - if !t { - return false - } - } - - return true - } - - return willConvertToTable(ctx, v) -} - -func (enc *Encoder) encodeSlice(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { - if v.Len() == 0 { - b = append(b, "[]"...) - - return b, nil - } - - if willConvertToTableOrArrayTable(ctx, v) { - return enc.encodeSliceAsArrayTable(b, ctx, v) - } - - return enc.encodeSliceAsArray(b, ctx, v) -} - -// caller should have checked that v is a slice that only contains values that -// encode into tables. -func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { - ctx.shiftKey() - - scratch := make([]byte, 0, 64) - - scratch = enc.commented(ctx.commented, scratch) - - scratch = append(scratch, "[["...) - - for i, k := range ctx.parentKey { - if i > 0 { - scratch = append(scratch, '.') - } - - scratch = enc.encodeKey(scratch, k) - } - - scratch = append(scratch, "]]\n"...) - ctx.skipTableHeader = true - - b = enc.encodeComment(ctx.indent, ctx.options.comment, b) - - if enc.indentTables { - ctx.indent++ - } - - for i := 0; i < v.Len(); i++ { - if i != 0 { - b = append(b, "\n"...) - } - - b = append(b, scratch...) - - var err error - b, err = enc.encode(b, ctx, v.Index(i)) - if err != nil { - return nil, err - } - } - - return b, nil -} - -func (enc *Encoder) encodeSliceAsArray(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { - multiline := ctx.options.multiline || enc.arraysMultiline - separator := ", " - - b = append(b, '[') - - subCtx := ctx - subCtx.options = valueOptions{} - - if multiline { - separator = ",\n" - - b = append(b, '\n') - - subCtx.indent++ - } - - var err error - first := true - - for i := 0; i < v.Len(); i++ { - if first { - first = false - } else { - b = append(b, separator...) - } - - if multiline { - b = enc.indent(subCtx.indent, b) - } - - b, err = enc.encode(b, subCtx, v.Index(i)) - if err != nil { - return nil, err - } - } - - if multiline { - b = append(b, '\n') - b = enc.indent(ctx.indent, b) - } - - b = append(b, ']') - - return b, nil -} - -func (enc *Encoder) indent(level int, b []byte) []byte { - for i := 0; i < level; i++ { - b = append(b, enc.indentSymbol...) - } - - return b -} diff --git a/vendor/github.com/pelletier/go-toml/v2/strict.go b/vendor/github.com/pelletier/go-toml/v2/strict.go deleted file mode 100644 index 802e7e4d15..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/strict.go +++ /dev/null @@ -1,107 +0,0 @@ -package toml - -import ( - "github.com/pelletier/go-toml/v2/internal/danger" - "github.com/pelletier/go-toml/v2/internal/tracker" - "github.com/pelletier/go-toml/v2/unstable" -) - -type strict struct { - Enabled bool - - // Tracks the current key being processed. - key tracker.KeyTracker - - missing []unstable.ParserError -} - -func (s *strict) EnterTable(node *unstable.Node) { - if !s.Enabled { - return - } - - s.key.UpdateTable(node) -} - -func (s *strict) EnterArrayTable(node *unstable.Node) { - if !s.Enabled { - return - } - - s.key.UpdateArrayTable(node) -} - -func (s *strict) EnterKeyValue(node *unstable.Node) { - if !s.Enabled { - return - } - - s.key.Push(node) -} - -func (s *strict) ExitKeyValue(node *unstable.Node) { - if !s.Enabled { - return - } - - s.key.Pop(node) -} - -func (s *strict) MissingTable(node *unstable.Node) { - if !s.Enabled { - return - } - - s.missing = append(s.missing, unstable.ParserError{ - Highlight: keyLocation(node), - Message: "missing table", - Key: s.key.Key(), - }) -} - -func (s *strict) MissingField(node *unstable.Node) { - if !s.Enabled { - return - } - - s.missing = append(s.missing, unstable.ParserError{ - Highlight: keyLocation(node), - Message: "missing field", - Key: s.key.Key(), - }) -} - -func (s *strict) Error(doc []byte) error { - if !s.Enabled || len(s.missing) == 0 { - return nil - } - - err := &StrictMissingError{ - Errors: make([]DecodeError, 0, len(s.missing)), - } - - for _, derr := range s.missing { - derr := derr - err.Errors = append(err.Errors, *wrapDecodeError(doc, &derr)) - } - - return err -} - -func keyLocation(node *unstable.Node) []byte { - k := node.Key() - - hasOne := k.Next() - if !hasOne { - panic("should not be called with empty key") - } - - start := k.Node().Data - end := k.Node().Data - - for k.Next() { - end = k.Node().Data - } - - return danger.BytesRange(start, end) -} diff --git a/vendor/github.com/pelletier/go-toml/v2/toml.abnf b/vendor/github.com/pelletier/go-toml/v2/toml.abnf deleted file mode 100644 index 473f3749e8..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/toml.abnf +++ /dev/null @@ -1,243 +0,0 @@ -;; This document describes TOML's syntax, using the ABNF format (defined in -;; RFC 5234 -- https://www.ietf.org/rfc/rfc5234.txt). -;; -;; All valid TOML documents will match this description, however certain -;; invalid documents would need to be rejected as per the semantics described -;; in the supporting text description. - -;; It is possible to try this grammar interactively, using instaparse. -;; http://instaparse.mojombo.com/ -;; -;; To do so, in the lower right, click on Options and change `:input-format` to -;; ':abnf'. Then paste this entire ABNF document into the grammar entry box -;; (above the options). Then you can type or paste a sample TOML document into -;; the beige box on the left. Tada! - -;; Overall Structure - -toml = expression *( newline expression ) - -expression = ws [ comment ] -expression =/ ws keyval ws [ comment ] -expression =/ ws table ws [ comment ] - -;; Whitespace - -ws = *wschar -wschar = %x20 ; Space -wschar =/ %x09 ; Horizontal tab - -;; Newline - -newline = %x0A ; LF -newline =/ %x0D.0A ; CRLF - -;; Comment - -comment-start-symbol = %x23 ; # -non-ascii = %x80-D7FF / %xE000-10FFFF -non-eol = %x09 / %x20-7F / non-ascii - -comment = comment-start-symbol *non-eol - -;; Key-Value pairs - -keyval = key keyval-sep val - -key = simple-key / dotted-key -simple-key = quoted-key / unquoted-key - -unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ -quoted-key = basic-string / literal-string -dotted-key = simple-key 1*( dot-sep simple-key ) - -dot-sep = ws %x2E ws ; . Period -keyval-sep = ws %x3D ws ; = - -val = string / boolean / array / inline-table / date-time / float / integer - -;; String - -string = ml-basic-string / basic-string / ml-literal-string / literal-string - -;; Basic String - -basic-string = quotation-mark *basic-char quotation-mark - -quotation-mark = %x22 ; " - -basic-char = basic-unescaped / escaped -basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii -escaped = escape escape-seq-char - -escape = %x5C ; \ -escape-seq-char = %x22 ; " quotation mark U+0022 -escape-seq-char =/ %x5C ; \ reverse solidus U+005C -escape-seq-char =/ %x62 ; b backspace U+0008 -escape-seq-char =/ %x66 ; f form feed U+000C -escape-seq-char =/ %x6E ; n line feed U+000A -escape-seq-char =/ %x72 ; r carriage return U+000D -escape-seq-char =/ %x74 ; t tab U+0009 -escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX -escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX - -;; Multiline Basic String - -ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body - ml-basic-string-delim -ml-basic-string-delim = 3quotation-mark -ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] - -mlb-content = mlb-char / newline / mlb-escaped-nl -mlb-char = mlb-unescaped / escaped -mlb-quotes = 1*2quotation-mark -mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii -mlb-escaped-nl = escape ws newline *( wschar / newline ) - -;; Literal String - -literal-string = apostrophe *literal-char apostrophe - -apostrophe = %x27 ; ' apostrophe - -literal-char = %x09 / %x20-26 / %x28-7E / non-ascii - -;; Multiline Literal String - -ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body - ml-literal-string-delim -ml-literal-string-delim = 3apostrophe -ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ] - -mll-content = mll-char / newline -mll-char = %x09 / %x20-26 / %x28-7E / non-ascii -mll-quotes = 1*2apostrophe - -;; Integer - -integer = dec-int / hex-int / oct-int / bin-int - -minus = %x2D ; - -plus = %x2B ; + -underscore = %x5F ; _ -digit1-9 = %x31-39 ; 1-9 -digit0-7 = %x30-37 ; 0-7 -digit0-1 = %x30-31 ; 0-1 - -hex-prefix = %x30.78 ; 0x -oct-prefix = %x30.6F ; 0o -bin-prefix = %x30.62 ; 0b - -dec-int = [ minus / plus ] unsigned-dec-int -unsigned-dec-int = DIGIT / digit1-9 1*( DIGIT / underscore DIGIT ) - -hex-int = hex-prefix HEXDIG *( HEXDIG / underscore HEXDIG ) -oct-int = oct-prefix digit0-7 *( digit0-7 / underscore digit0-7 ) -bin-int = bin-prefix digit0-1 *( digit0-1 / underscore digit0-1 ) - -;; Float - -float = float-int-part ( exp / frac [ exp ] ) -float =/ special-float - -float-int-part = dec-int -frac = decimal-point zero-prefixable-int -decimal-point = %x2E ; . -zero-prefixable-int = DIGIT *( DIGIT / underscore DIGIT ) - -exp = "e" float-exp-part -float-exp-part = [ minus / plus ] zero-prefixable-int - -special-float = [ minus / plus ] ( inf / nan ) -inf = %x69.6e.66 ; inf -nan = %x6e.61.6e ; nan - -;; Boolean - -boolean = true / false - -true = %x74.72.75.65 ; true -false = %x66.61.6C.73.65 ; false - -;; Date and Time (as defined in RFC 3339) - -date-time = offset-date-time / local-date-time / local-date / local-time - -date-fullyear = 4DIGIT -date-month = 2DIGIT ; 01-12 -date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year -time-delim = "T" / %x20 ; T, t, or space -time-hour = 2DIGIT ; 00-23 -time-minute = 2DIGIT ; 00-59 -time-second = 2DIGIT ; 00-58, 00-59, 00-60 based on leap second rules -time-secfrac = "." 1*DIGIT -time-numoffset = ( "+" / "-" ) time-hour ":" time-minute -time-offset = "Z" / time-numoffset - -partial-time = time-hour ":" time-minute ":" time-second [ time-secfrac ] -full-date = date-fullyear "-" date-month "-" date-mday -full-time = partial-time time-offset - -;; Offset Date-Time - -offset-date-time = full-date time-delim full-time - -;; Local Date-Time - -local-date-time = full-date time-delim partial-time - -;; Local Date - -local-date = full-date - -;; Local Time - -local-time = partial-time - -;; Array - -array = array-open [ array-values ] ws-comment-newline array-close - -array-open = %x5B ; [ -array-close = %x5D ; ] - -array-values = ws-comment-newline val ws-comment-newline array-sep array-values -array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] - -array-sep = %x2C ; , Comma - -ws-comment-newline = *( wschar / [ comment ] newline ) - -;; Table - -table = std-table / array-table - -;; Standard Table - -std-table = std-table-open key std-table-close - -std-table-open = %x5B ws ; [ Left square bracket -std-table-close = ws %x5D ; ] Right square bracket - -;; Inline Table - -inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close - -inline-table-open = %x7B ws ; { -inline-table-close = ws %x7D ; } -inline-table-sep = ws %x2C ws ; , Comma - -inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] - -;; Array Table - -array-table = array-table-open key array-table-close - -array-table-open = %x5B.5B ws ; [[ Double left square bracket -array-table-close = ws %x5D.5D ; ]] Double right square bracket - -;; Built-in ABNF terms, reproduced here for clarity - -ALPHA = %x41-5A / %x61-7A ; A-Z / a-z -DIGIT = %x30-39 ; 0-9 -HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F" diff --git a/vendor/github.com/pelletier/go-toml/v2/types.go b/vendor/github.com/pelletier/go-toml/v2/types.go deleted file mode 100644 index 3c6b8fe570..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/types.go +++ /dev/null @@ -1,14 +0,0 @@ -package toml - -import ( - "encoding" - "reflect" - "time" -) - -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() -var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() -var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() -var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}(nil)) -var sliceInterfaceType = reflect.TypeOf([]interface{}(nil)) -var stringType = reflect.TypeOf("") diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go deleted file mode 100644 index 868c74c157..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ /dev/null @@ -1,1264 +0,0 @@ -package toml - -import ( - "encoding" - "errors" - "fmt" - "io" - "io/ioutil" - "math" - "reflect" - "strings" - "sync/atomic" - "time" - - "github.com/pelletier/go-toml/v2/internal/danger" - "github.com/pelletier/go-toml/v2/internal/tracker" - "github.com/pelletier/go-toml/v2/unstable" -) - -// Unmarshal deserializes a TOML document into a Go value. -// -// It is a shortcut for Decoder.Decode() with the default options. -func Unmarshal(data []byte, v interface{}) error { - p := unstable.Parser{} - p.Reset(data) - d := decoder{p: &p} - - return d.FromParser(v) -} - -// Decoder reads and decode a TOML document from an input stream. -type Decoder struct { - // input - r io.Reader - - // global settings - strict bool -} - -// NewDecoder creates a new Decoder that will read from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{r: r} -} - -// DisallowUnknownFields causes the Decoder to return an error when the -// destination is a struct and the input contains a key that does not match a -// non-ignored field. -// -// In that case, the Decoder returns a StrictMissingError that can be used to -// retrieve the individual errors as well as generate a human readable -// description of the missing fields. -func (d *Decoder) DisallowUnknownFields() *Decoder { - d.strict = true - return d -} - -// Decode the whole content of r into v. -// -// By default, values in the document that don't exist in the target Go value -// are ignored. See Decoder.DisallowUnknownFields() to change this behavior. -// -// When a TOML local date, time, or date-time is decoded into a time.Time, its -// value is represented in time.Local timezone. Otherwise the appropriate Local* -// structure is used. For time values, precision up to the nanosecond is -// supported by truncating extra digits. -// -// Empty tables decoded in an interface{} create an empty initialized -// map[string]interface{}. -// -// Types implementing the encoding.TextUnmarshaler interface are decoded from a -// TOML string. -// -// When decoding a number, go-toml will return an error if the number is out of -// bounds for the target type (which includes negative numbers when decoding -// into an unsigned int). -// -// If an error occurs while decoding the content of the document, this function -// returns a toml.DecodeError, providing context about the issue. When using -// strict mode and a field is missing, a `toml.StrictMissingError` is -// returned. In any other case, this function returns a standard Go error. -// -// # Type mapping -// -// List of supported TOML types and their associated accepted Go types: -// -// String -> string -// Integer -> uint*, int*, depending on size -// Float -> float*, depending on size -// Boolean -> bool -// Offset Date-Time -> time.Time -// Local Date-time -> LocalDateTime, time.Time -// Local Date -> LocalDate, time.Time -// Local Time -> LocalTime, time.Time -// Array -> slice and array, depending on elements types -// Table -> map and struct -// Inline Table -> same as Table -// Array of Tables -> same as Array and Table -func (d *Decoder) Decode(v interface{}) error { - b, err := ioutil.ReadAll(d.r) - if err != nil { - return fmt.Errorf("toml: %w", err) - } - - p := unstable.Parser{} - p.Reset(b) - dec := decoder{ - p: &p, - strict: strict{ - Enabled: d.strict, - }, - } - - return dec.FromParser(v) -} - -type decoder struct { - // Which parser instance in use for this decoding session. - p *unstable.Parser - - // Flag indicating that the current expression is stashed. - // If set to true, calling nextExpr will not actually pull a new expression - // but turn off the flag instead. - stashedExpr bool - - // Skip expressions until a table is found. This is set to true when a - // table could not be created (missing field in map), so all KV expressions - // need to be skipped. - skipUntilTable bool - - // Tracks position in Go arrays. - // This is used when decoding [[array tables]] into Go arrays. Given array - // tables are separate TOML expression, we need to keep track of where we - // are at in the Go array, as we can't just introspect its size. - arrayIndexes map[reflect.Value]int - - // Tracks keys that have been seen, with which type. - seen tracker.SeenTracker - - // Strict mode - strict strict - - // Current context for the error. - errorContext *errorContext -} - -type errorContext struct { - Struct reflect.Type - Field []int -} - -func (d *decoder) typeMismatchError(toml string, target reflect.Type) error { - return fmt.Errorf("toml: %s", d.typeMismatchString(toml, target)) -} - -func (d *decoder) typeMismatchString(toml string, target reflect.Type) string { - if d.errorContext != nil && d.errorContext.Struct != nil { - ctx := d.errorContext - f := ctx.Struct.FieldByIndex(ctx.Field) - return fmt.Sprintf("cannot decode TOML %s into struct field %s.%s of type %s", toml, ctx.Struct, f.Name, f.Type) - } - return fmt.Sprintf("cannot decode TOML %s into a Go value of type %s", toml, target) -} - -func (d *decoder) expr() *unstable.Node { - return d.p.Expression() -} - -func (d *decoder) nextExpr() bool { - if d.stashedExpr { - d.stashedExpr = false - return true - } - return d.p.NextExpression() -} - -func (d *decoder) stashExpr() { - d.stashedExpr = true -} - -func (d *decoder) arrayIndex(shouldAppend bool, v reflect.Value) int { - if d.arrayIndexes == nil { - d.arrayIndexes = make(map[reflect.Value]int, 1) - } - - idx, ok := d.arrayIndexes[v] - - if !ok { - d.arrayIndexes[v] = 0 - } else if shouldAppend { - idx++ - d.arrayIndexes[v] = idx - } - - return idx -} - -func (d *decoder) FromParser(v interface{}) error { - r := reflect.ValueOf(v) - if r.Kind() != reflect.Ptr { - return fmt.Errorf("toml: decoding can only be performed into a pointer, not %s", r.Kind()) - } - - if r.IsNil() { - return fmt.Errorf("toml: decoding pointer target cannot be nil") - } - - r = r.Elem() - if r.Kind() == reflect.Interface && r.IsNil() { - newMap := map[string]interface{}{} - r.Set(reflect.ValueOf(newMap)) - } - - err := d.fromParser(r) - if err == nil { - return d.strict.Error(d.p.Data()) - } - - var e *unstable.ParserError - if errors.As(err, &e) { - return wrapDecodeError(d.p.Data(), e) - } - - return err -} - -func (d *decoder) fromParser(root reflect.Value) error { - for d.nextExpr() { - err := d.handleRootExpression(d.expr(), root) - if err != nil { - return err - } - } - - return d.p.Error() -} - -/* -Rules for the unmarshal code: - -- The stack is used to keep track of which values need to be set where. -- handle* functions <=> switch on a given unstable.Kind. -- unmarshalX* functions need to unmarshal a node of kind X. -- An "object" is either a struct or a map. -*/ - -func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) error { - var x reflect.Value - var err error - - if !(d.skipUntilTable && expr.Kind == unstable.KeyValue) { - err = d.seen.CheckExpression(expr) - if err != nil { - return err - } - } - - switch expr.Kind { - case unstable.KeyValue: - if d.skipUntilTable { - return nil - } - x, err = d.handleKeyValue(expr, v) - case unstable.Table: - d.skipUntilTable = false - d.strict.EnterTable(expr) - x, err = d.handleTable(expr.Key(), v) - case unstable.ArrayTable: - d.skipUntilTable = false - d.strict.EnterArrayTable(expr) - x, err = d.handleArrayTable(expr.Key(), v) - default: - panic(fmt.Errorf("parser should not permit expression of kind %s at document root", expr.Kind)) - } - - if d.skipUntilTable { - if expr.Kind == unstable.Table || expr.Kind == unstable.ArrayTable { - d.strict.MissingTable(expr) - } - } else if err == nil && x.IsValid() { - v.Set(x) - } - - return err -} - -func (d *decoder) handleArrayTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { - if key.Next() { - return d.handleArrayTablePart(key, v) - } - return d.handleKeyValues(v) -} - -func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { - switch v.Kind() { - case reflect.Interface: - elem := v.Elem() - if !elem.IsValid() { - elem = reflect.New(sliceInterfaceType).Elem() - elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) - } else if elem.Kind() == reflect.Slice { - if elem.Type() != sliceInterfaceType { - elem = reflect.New(sliceInterfaceType).Elem() - elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) - } else if !elem.CanSet() { - nelem := reflect.New(sliceInterfaceType).Elem() - nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) - reflect.Copy(nelem, elem) - elem = nelem - } - } - return d.handleArrayTableCollectionLast(key, elem) - case reflect.Ptr: - elem := v.Elem() - if !elem.IsValid() { - ptr := reflect.New(v.Type().Elem()) - v.Set(ptr) - elem = ptr.Elem() - } - - elem, err := d.handleArrayTableCollectionLast(key, elem) - if err != nil { - return reflect.Value{}, err - } - v.Elem().Set(elem) - - return v, nil - case reflect.Slice: - elemType := v.Type().Elem() - var elem reflect.Value - if elemType.Kind() == reflect.Interface { - elem = makeMapStringInterface() - } else { - elem = reflect.New(elemType).Elem() - } - elem2, err := d.handleArrayTable(key, elem) - if err != nil { - return reflect.Value{}, err - } - if elem2.IsValid() { - elem = elem2 - } - return reflect.Append(v, elem), nil - case reflect.Array: - idx := d.arrayIndex(true, v) - if idx >= v.Len() { - return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) - } - elem := v.Index(idx) - _, err := d.handleArrayTable(key, elem) - return v, err - default: - return reflect.Value{}, d.typeMismatchError("array table", v.Type()) - } -} - -// When parsing an array table expression, each part of the key needs to be -// evaluated like a normal key, but if it returns a collection, it also needs to -// point to the last element of the collection. Unless it is the last part of -// the key, then it needs to create a new element at the end. -func (d *decoder) handleArrayTableCollection(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { - if key.IsLast() { - return d.handleArrayTableCollectionLast(key, v) - } - - switch v.Kind() { - case reflect.Ptr: - elem := v.Elem() - if !elem.IsValid() { - ptr := reflect.New(v.Type().Elem()) - v.Set(ptr) - elem = ptr.Elem() - } - - elem, err := d.handleArrayTableCollection(key, elem) - if err != nil { - return reflect.Value{}, err - } - if elem.IsValid() { - v.Elem().Set(elem) - } - - return v, nil - case reflect.Slice: - elem := v.Index(v.Len() - 1) - x, err := d.handleArrayTable(key, elem) - if err != nil || d.skipUntilTable { - return reflect.Value{}, err - } - if x.IsValid() { - elem.Set(x) - } - - return v, err - case reflect.Array: - idx := d.arrayIndex(false, v) - if idx >= v.Len() { - return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) - } - elem := v.Index(idx) - _, err := d.handleArrayTable(key, elem) - return v, err - } - - return d.handleArrayTable(key, v) -} - -func (d *decoder) handleKeyPart(key unstable.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) { - var rv reflect.Value - - // First, dispatch over v to make sure it is a valid object. - // There is no guarantee over what it could be. - switch v.Kind() { - case reflect.Ptr: - elem := v.Elem() - if !elem.IsValid() { - v.Set(reflect.New(v.Type().Elem())) - } - elem = v.Elem() - return d.handleKeyPart(key, elem, nextFn, makeFn) - case reflect.Map: - vt := v.Type() - - // Create the key for the map element. Convert to key type. - mk, err := d.keyFromData(vt.Key(), key.Node().Data) - if err != nil { - return reflect.Value{}, err - } - - // If the map does not exist, create it. - if v.IsNil() { - vt := v.Type() - v = reflect.MakeMap(vt) - rv = v - } - - mv := v.MapIndex(mk) - set := false - if !mv.IsValid() { - // If there is no value in the map, create a new one according to - // the map type. If the element type is interface, create either a - // map[string]interface{} or a []interface{} depending on whether - // this is the last part of the array table key. - - t := vt.Elem() - if t.Kind() == reflect.Interface { - mv = makeFn() - } else { - mv = reflect.New(t).Elem() - } - set = true - } else if mv.Kind() == reflect.Interface { - mv = mv.Elem() - if !mv.IsValid() { - mv = makeFn() - } - set = true - } else if !mv.CanAddr() { - vt := v.Type() - t := vt.Elem() - oldmv := mv - mv = reflect.New(t).Elem() - mv.Set(oldmv) - set = true - } - - x, err := nextFn(key, mv) - if err != nil { - return reflect.Value{}, err - } - - if x.IsValid() { - mv = x - set = true - } - - if set { - v.SetMapIndex(mk, mv) - } - case reflect.Struct: - path, found := structFieldPath(v, string(key.Node().Data)) - if !found { - d.skipUntilTable = true - return reflect.Value{}, nil - } - - if d.errorContext == nil { - d.errorContext = new(errorContext) - } - t := v.Type() - d.errorContext.Struct = t - d.errorContext.Field = path - - f := fieldByIndex(v, path) - x, err := nextFn(key, f) - if err != nil || d.skipUntilTable { - return reflect.Value{}, err - } - if x.IsValid() { - f.Set(x) - } - d.errorContext.Field = nil - d.errorContext.Struct = nil - case reflect.Interface: - if v.Elem().IsValid() { - v = v.Elem() - } else { - v = makeMapStringInterface() - } - - x, err := d.handleKeyPart(key, v, nextFn, makeFn) - if err != nil { - return reflect.Value{}, err - } - if x.IsValid() { - v = x - } - rv = v - default: - panic(fmt.Errorf("unhandled part: %s", v.Kind())) - } - - return rv, nil -} - -// HandleArrayTablePart navigates the Go structure v using the key v. It is -// only used for the prefix (non-last) parts of an array-table. When -// encountering a collection, it should go to the last element. -func (d *decoder) handleArrayTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { - var makeFn valueMakerFn - if key.IsLast() { - makeFn = makeSliceInterface - } else { - makeFn = makeMapStringInterface - } - return d.handleKeyPart(key, v, d.handleArrayTableCollection, makeFn) -} - -// HandleTable returns a reference when it has checked the next expression but -// cannot handle it. -func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { - if v.Kind() == reflect.Slice { - if v.Len() == 0 { - return reflect.Value{}, unstable.NewParserError(key.Node().Data, "cannot store a table in a slice") - } - elem := v.Index(v.Len() - 1) - x, err := d.handleTable(key, elem) - if err != nil { - return reflect.Value{}, err - } - if x.IsValid() { - elem.Set(x) - } - return reflect.Value{}, nil - } - if key.Next() { - // Still scoping the key - return d.handleTablePart(key, v) - } - // Done scoping the key. - // Now handle all the key-value expressions in this table. - return d.handleKeyValues(v) -} - -// Handle root expressions until the end of the document or the next -// non-key-value. -func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { - var rv reflect.Value - for d.nextExpr() { - expr := d.expr() - if expr.Kind != unstable.KeyValue { - // Stash the expression so that fromParser can just loop and use - // the right handler. - // We could just recurse ourselves here, but at least this gives a - // chance to pop the stack a bit. - d.stashExpr() - break - } - - err := d.seen.CheckExpression(expr) - if err != nil { - return reflect.Value{}, err - } - - x, err := d.handleKeyValue(expr, v) - if err != nil { - return reflect.Value{}, err - } - if x.IsValid() { - v = x - rv = x - } - } - return rv, nil -} - -type ( - handlerFn func(key unstable.Iterator, v reflect.Value) (reflect.Value, error) - valueMakerFn func() reflect.Value -) - -func makeMapStringInterface() reflect.Value { - return reflect.MakeMap(mapStringInterfaceType) -} - -func makeSliceInterface() reflect.Value { - return reflect.MakeSlice(sliceInterfaceType, 0, 16) -} - -func (d *decoder) handleTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { - return d.handleKeyPart(key, v, d.handleTable, makeMapStringInterface) -} - -func (d *decoder) tryTextUnmarshaler(node *unstable.Node, v reflect.Value) (bool, error) { - // Special case for time, because we allow to unmarshal to it from - // different kind of AST nodes. - if v.Type() == timeType { - return false, nil - } - - if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) { - err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data) - if err != nil { - return false, unstable.NewParserError(d.p.Raw(node.Raw), "%w", err) - } - - return true, nil - } - - return false, nil -} - -func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error { - for v.Kind() == reflect.Ptr { - v = initAndDereferencePointer(v) - } - - ok, err := d.tryTextUnmarshaler(value, v) - if ok || err != nil { - return err - } - - switch value.Kind { - case unstable.String: - return d.unmarshalString(value, v) - case unstable.Integer: - return d.unmarshalInteger(value, v) - case unstable.Float: - return d.unmarshalFloat(value, v) - case unstable.Bool: - return d.unmarshalBool(value, v) - case unstable.DateTime: - return d.unmarshalDateTime(value, v) - case unstable.LocalDate: - return d.unmarshalLocalDate(value, v) - case unstable.LocalTime: - return d.unmarshalLocalTime(value, v) - case unstable.LocalDateTime: - return d.unmarshalLocalDateTime(value, v) - case unstable.InlineTable: - return d.unmarshalInlineTable(value, v) - case unstable.Array: - return d.unmarshalArray(value, v) - default: - panic(fmt.Errorf("handleValue not implemented for %s", value.Kind)) - } -} - -func (d *decoder) unmarshalArray(array *unstable.Node, v reflect.Value) error { - switch v.Kind() { - case reflect.Slice: - if v.IsNil() { - v.Set(reflect.MakeSlice(v.Type(), 0, 16)) - } else { - v.SetLen(0) - } - case reflect.Array: - // arrays are always initialized - case reflect.Interface: - elem := v.Elem() - if !elem.IsValid() { - elem = reflect.New(sliceInterfaceType).Elem() - elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) - } else if elem.Kind() == reflect.Slice { - if elem.Type() != sliceInterfaceType { - elem = reflect.New(sliceInterfaceType).Elem() - elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) - } else if !elem.CanSet() { - nelem := reflect.New(sliceInterfaceType).Elem() - nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) - reflect.Copy(nelem, elem) - elem = nelem - } - } - err := d.unmarshalArray(array, elem) - if err != nil { - return err - } - v.Set(elem) - return nil - default: - // TODO: use newDecodeError, but first the parser needs to fill - // array.Data. - return d.typeMismatchError("array", v.Type()) - } - - elemType := v.Type().Elem() - - it := array.Children() - idx := 0 - for it.Next() { - n := it.Node() - - // TODO: optimize - if v.Kind() == reflect.Slice { - elem := reflect.New(elemType).Elem() - - err := d.handleValue(n, elem) - if err != nil { - return err - } - - v.Set(reflect.Append(v, elem)) - } else { // array - if idx >= v.Len() { - return nil - } - elem := v.Index(idx) - err := d.handleValue(n, elem) - if err != nil { - return err - } - idx++ - } - } - - return nil -} - -func (d *decoder) unmarshalInlineTable(itable *unstable.Node, v reflect.Value) error { - // Make sure v is an initialized object. - switch v.Kind() { - case reflect.Map: - if v.IsNil() { - v.Set(reflect.MakeMap(v.Type())) - } - case reflect.Struct: - // structs are always initialized. - case reflect.Interface: - elem := v.Elem() - if !elem.IsValid() { - elem = makeMapStringInterface() - v.Set(elem) - } - return d.unmarshalInlineTable(itable, elem) - default: - return unstable.NewParserError(d.p.Raw(itable.Raw), "cannot store inline table in Go type %s", v.Kind()) - } - - it := itable.Children() - for it.Next() { - n := it.Node() - - x, err := d.handleKeyValue(n, v) - if err != nil { - return err - } - if x.IsValid() { - v = x - } - } - - return nil -} - -func (d *decoder) unmarshalDateTime(value *unstable.Node, v reflect.Value) error { - dt, err := parseDateTime(value.Data) - if err != nil { - return err - } - - v.Set(reflect.ValueOf(dt)) - return nil -} - -func (d *decoder) unmarshalLocalDate(value *unstable.Node, v reflect.Value) error { - ld, err := parseLocalDate(value.Data) - if err != nil { - return err - } - - if v.Type() == timeType { - cast := ld.AsTime(time.Local) - v.Set(reflect.ValueOf(cast)) - return nil - } - - v.Set(reflect.ValueOf(ld)) - - return nil -} - -func (d *decoder) unmarshalLocalTime(value *unstable.Node, v reflect.Value) error { - lt, rest, err := parseLocalTime(value.Data) - if err != nil { - return err - } - - if len(rest) > 0 { - return unstable.NewParserError(rest, "extra characters at the end of a local time") - } - - v.Set(reflect.ValueOf(lt)) - return nil -} - -func (d *decoder) unmarshalLocalDateTime(value *unstable.Node, v reflect.Value) error { - ldt, rest, err := parseLocalDateTime(value.Data) - if err != nil { - return err - } - - if len(rest) > 0 { - return unstable.NewParserError(rest, "extra characters at the end of a local date time") - } - - if v.Type() == timeType { - cast := ldt.AsTime(time.Local) - - v.Set(reflect.ValueOf(cast)) - return nil - } - - v.Set(reflect.ValueOf(ldt)) - - return nil -} - -func (d *decoder) unmarshalBool(value *unstable.Node, v reflect.Value) error { - b := value.Data[0] == 't' - - switch v.Kind() { - case reflect.Bool: - v.SetBool(b) - case reflect.Interface: - v.Set(reflect.ValueOf(b)) - default: - return unstable.NewParserError(value.Data, "cannot assign boolean to a %t", b) - } - - return nil -} - -func (d *decoder) unmarshalFloat(value *unstable.Node, v reflect.Value) error { - f, err := parseFloat(value.Data) - if err != nil { - return err - } - - switch v.Kind() { - case reflect.Float64: - v.SetFloat(f) - case reflect.Float32: - if f > math.MaxFloat32 { - return unstable.NewParserError(value.Data, "number %f does not fit in a float32", f) - } - v.SetFloat(f) - case reflect.Interface: - v.Set(reflect.ValueOf(f)) - default: - return unstable.NewParserError(value.Data, "float cannot be assigned to %s", v.Kind()) - } - - return nil -} - -const ( - maxInt = int64(^uint(0) >> 1) - minInt = -maxInt - 1 -) - -// Maximum value of uint for decoding. Currently the decoder parses the integer -// into an int64. As a result, on architectures where uint is 64 bits, the -// effective maximum uint we can decode is the maximum of int64. On -// architectures where uint is 32 bits, the maximum value we can decode is -// lower: the maximum of uint32. I didn't find a way to figure out this value at -// compile time, so it is computed during initialization. -var maxUint int64 = math.MaxInt64 - -func init() { - m := uint64(^uint(0)) - if m < uint64(maxUint) { - maxUint = int64(m) - } -} - -func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error { - kind := v.Kind() - if kind == reflect.Float32 || kind == reflect.Float64 { - return d.unmarshalFloat(value, v) - } - - i, err := parseInteger(value.Data) - if err != nil { - return err - } - - var r reflect.Value - - switch kind { - case reflect.Int64: - v.SetInt(i) - return nil - case reflect.Int32: - if i < math.MinInt32 || i > math.MaxInt32 { - return fmt.Errorf("toml: number %d does not fit in an int32", i) - } - - r = reflect.ValueOf(int32(i)) - case reflect.Int16: - if i < math.MinInt16 || i > math.MaxInt16 { - return fmt.Errorf("toml: number %d does not fit in an int16", i) - } - - r = reflect.ValueOf(int16(i)) - case reflect.Int8: - if i < math.MinInt8 || i > math.MaxInt8 { - return fmt.Errorf("toml: number %d does not fit in an int8", i) - } - - r = reflect.ValueOf(int8(i)) - case reflect.Int: - if i < minInt || i > maxInt { - return fmt.Errorf("toml: number %d does not fit in an int", i) - } - - r = reflect.ValueOf(int(i)) - case reflect.Uint64: - if i < 0 { - return fmt.Errorf("toml: negative number %d does not fit in an uint64", i) - } - - r = reflect.ValueOf(uint64(i)) - case reflect.Uint32: - if i < 0 || i > math.MaxUint32 { - return fmt.Errorf("toml: negative number %d does not fit in an uint32", i) - } - - r = reflect.ValueOf(uint32(i)) - case reflect.Uint16: - if i < 0 || i > math.MaxUint16 { - return fmt.Errorf("toml: negative number %d does not fit in an uint16", i) - } - - r = reflect.ValueOf(uint16(i)) - case reflect.Uint8: - if i < 0 || i > math.MaxUint8 { - return fmt.Errorf("toml: negative number %d does not fit in an uint8", i) - } - - r = reflect.ValueOf(uint8(i)) - case reflect.Uint: - if i < 0 || i > maxUint { - return fmt.Errorf("toml: negative number %d does not fit in an uint", i) - } - - r = reflect.ValueOf(uint(i)) - case reflect.Interface: - r = reflect.ValueOf(i) - default: - return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("integer", v.Type())) - } - - if !r.Type().AssignableTo(v.Type()) { - r = r.Convert(v.Type()) - } - - v.Set(r) - - return nil -} - -func (d *decoder) unmarshalString(value *unstable.Node, v reflect.Value) error { - switch v.Kind() { - case reflect.String: - v.SetString(string(value.Data)) - case reflect.Interface: - v.Set(reflect.ValueOf(string(value.Data))) - default: - return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("string", v.Type())) - } - - return nil -} - -func (d *decoder) handleKeyValue(expr *unstable.Node, v reflect.Value) (reflect.Value, error) { - d.strict.EnterKeyValue(expr) - - v, err := d.handleKeyValueInner(expr.Key(), expr.Value(), v) - if d.skipUntilTable { - d.strict.MissingField(expr) - d.skipUntilTable = false - } - - d.strict.ExitKeyValue(expr) - - return v, err -} - -func (d *decoder) handleKeyValueInner(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { - if key.Next() { - // Still scoping the key - return d.handleKeyValuePart(key, value, v) - } - // Done scoping the key. - // v is whatever Go value we need to fill. - return reflect.Value{}, d.handleValue(value, v) -} - -func (d *decoder) keyFromData(keyType reflect.Type, data []byte) (reflect.Value, error) { - switch { - case stringType.AssignableTo(keyType): - return reflect.ValueOf(string(data)), nil - - case stringType.ConvertibleTo(keyType): - return reflect.ValueOf(string(data)).Convert(keyType), nil - - case keyType.Implements(textUnmarshalerType): - mk := reflect.New(keyType.Elem()) - if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { - return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) - } - return mk, nil - - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): - mk := reflect.New(keyType) - if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { - return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) - } - return mk.Elem(), nil - } - return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", stringType, keyType) -} - -func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { - // contains the replacement for v - var rv reflect.Value - - // First, dispatch over v to make sure it is a valid object. - // There is no guarantee over what it could be. - switch v.Kind() { - case reflect.Map: - vt := v.Type() - - mk, err := d.keyFromData(vt.Key(), key.Node().Data) - if err != nil { - return reflect.Value{}, err - } - - // If the map does not exist, create it. - if v.IsNil() { - v = reflect.MakeMap(vt) - rv = v - } - - mv := v.MapIndex(mk) - set := false - if !mv.IsValid() || key.IsLast() { - set = true - mv = reflect.New(v.Type().Elem()).Elem() - } - - nv, err := d.handleKeyValueInner(key, value, mv) - if err != nil { - return reflect.Value{}, err - } - if nv.IsValid() { - mv = nv - set = true - } - - if set { - v.SetMapIndex(mk, mv) - } - case reflect.Struct: - path, found := structFieldPath(v, string(key.Node().Data)) - if !found { - d.skipUntilTable = true - break - } - - if d.errorContext == nil { - d.errorContext = new(errorContext) - } - t := v.Type() - d.errorContext.Struct = t - d.errorContext.Field = path - - f := fieldByIndex(v, path) - - if !f.CanSet() { - // If the field is not settable, need to take a slower path and make a copy of - // the struct itself to a new location. - nvp := reflect.New(v.Type()) - nvp.Elem().Set(v) - v = nvp.Elem() - _, err := d.handleKeyValuePart(key, value, v) - if err != nil { - return reflect.Value{}, err - } - return nvp.Elem(), nil - } - x, err := d.handleKeyValueInner(key, value, f) - if err != nil { - return reflect.Value{}, err - } - - if x.IsValid() { - f.Set(x) - } - d.errorContext.Struct = nil - d.errorContext.Field = nil - case reflect.Interface: - v = v.Elem() - - // Following encoding/json: decoding an object into an - // interface{}, it needs to always hold a - // map[string]interface{}. This is for the types to be - // consistent whether a previous value was set or not. - if !v.IsValid() || v.Type() != mapStringInterfaceType { - v = makeMapStringInterface() - } - - x, err := d.handleKeyValuePart(key, value, v) - if err != nil { - return reflect.Value{}, err - } - if x.IsValid() { - v = x - } - rv = v - case reflect.Ptr: - elem := v.Elem() - if !elem.IsValid() { - ptr := reflect.New(v.Type().Elem()) - v.Set(ptr) - rv = v - elem = ptr.Elem() - } - - elem2, err := d.handleKeyValuePart(key, value, elem) - if err != nil { - return reflect.Value{}, err - } - if elem2.IsValid() { - elem = elem2 - } - v.Elem().Set(elem) - default: - return reflect.Value{}, fmt.Errorf("unhandled kv part: %s", v.Kind()) - } - - return rv, nil -} - -func initAndDereferencePointer(v reflect.Value) reflect.Value { - var elem reflect.Value - if v.IsNil() { - ptr := reflect.New(v.Type().Elem()) - v.Set(ptr) - } - elem = v.Elem() - return elem -} - -// Same as reflect.Value.FieldByIndex, but creates pointers if needed. -func fieldByIndex(v reflect.Value, path []int) reflect.Value { - for _, x := range path { - v = v.Field(x) - - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - } - return v -} - -type fieldPathsMap = map[string][]int - -var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap - -func structFieldPath(v reflect.Value, name string) ([]int, bool) { - t := v.Type() - - cache, _ := globalFieldPathsCache.Load().(map[danger.TypeID]fieldPathsMap) - fieldPaths, ok := cache[danger.MakeTypeID(t)] - - if !ok { - fieldPaths = map[string][]int{} - - forEachField(t, nil, func(name string, path []int) { - fieldPaths[name] = path - // extra copy for the case-insensitive match - fieldPaths[strings.ToLower(name)] = path - }) - - newCache := make(map[danger.TypeID]fieldPathsMap, len(cache)+1) - newCache[danger.MakeTypeID(t)] = fieldPaths - for k, v := range cache { - newCache[k] = v - } - globalFieldPathsCache.Store(newCache) - } - - path, ok := fieldPaths[name] - if !ok { - path, ok = fieldPaths[strings.ToLower(name)] - } - return path, ok -} - -func forEachField(t reflect.Type, path []int, do func(name string, path []int)) { - n := t.NumField() - for i := 0; i < n; i++ { - f := t.Field(i) - - if !f.Anonymous && f.PkgPath != "" { - // only consider exported fields. - continue - } - - fieldPath := append(path, i) - fieldPath = fieldPath[:len(fieldPath):len(fieldPath)] - - name := f.Tag.Get("toml") - if name == "-" { - continue - } - - if i := strings.IndexByte(name, ','); i >= 0 { - name = name[:i] - } - - if f.Anonymous && name == "" { - t2 := f.Type - if t2.Kind() == reflect.Ptr { - t2 = t2.Elem() - } - - if t2.Kind() == reflect.Struct { - forEachField(t2, fieldPath, do) - } - continue - } - - if name == "" { - name = f.Name - } - - do(name, fieldPath) - } -} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go b/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go deleted file mode 100644 index f526bf2c09..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go +++ /dev/null @@ -1,136 +0,0 @@ -package unstable - -import ( - "fmt" - "unsafe" - - "github.com/pelletier/go-toml/v2/internal/danger" -) - -// Iterator over a sequence of nodes. -// -// Starts uninitialized, you need to call Next() first. -// -// For example: -// -// it := n.Children() -// for it.Next() { -// n := it.Node() -// // do something with n -// } -type Iterator struct { - started bool - node *Node -} - -// Next moves the iterator forward and returns true if points to a -// node, false otherwise. -func (c *Iterator) Next() bool { - if !c.started { - c.started = true - } else if c.node.Valid() { - c.node = c.node.Next() - } - return c.node.Valid() -} - -// IsLast returns true if the current node of the iterator is the last -// one. Subsequent calls to Next() will return false. -func (c *Iterator) IsLast() bool { - return c.node.next == 0 -} - -// Node returns a pointer to the node pointed at by the iterator. -func (c *Iterator) Node() *Node { - return c.node -} - -// Node in a TOML expression AST. -// -// Depending on Kind, its sequence of children should be interpreted -// differently. -// -// - Array have one child per element in the array. -// - InlineTable have one child per key-value in the table (each of kind -// InlineTable). -// - KeyValue have at least two children. The first one is the value. The rest -// make a potentially dotted key. -// - Table and ArrayTable's children represent a dotted key (same as -// KeyValue, but without the first node being the value). -// -// When relevant, Raw describes the range of bytes this node is referring to in -// the input document. Use Parser.Raw() to retrieve the actual bytes. -type Node struct { - Kind Kind - Raw Range // Raw bytes from the input. - Data []byte // Node value (either allocated or referencing the input). - - // References to other nodes, as offsets in the backing array - // from this node. References can go backward, so those can be - // negative. - next int // 0 if last element - child int // 0 if no child -} - -// Range of bytes in the document. -type Range struct { - Offset uint32 - Length uint32 -} - -// Next returns a pointer to the next node, or nil if there is no next node. -func (n *Node) Next() *Node { - if n.next == 0 { - return nil - } - ptr := unsafe.Pointer(n) - size := unsafe.Sizeof(Node{}) - return (*Node)(danger.Stride(ptr, size, n.next)) -} - -// Child returns a pointer to the first child node of this node. Other children -// can be accessed calling Next on the first child. Returns an nil if this Node -// has no child. -func (n *Node) Child() *Node { - if n.child == 0 { - return nil - } - ptr := unsafe.Pointer(n) - size := unsafe.Sizeof(Node{}) - return (*Node)(danger.Stride(ptr, size, n.child)) -} - -// Valid returns true if the node's kind is set (not to Invalid). -func (n *Node) Valid() bool { - return n != nil -} - -// Key returns the children nodes making the Key on a supported node. Panics -// otherwise. They are guaranteed to be all be of the Kind Key. A simple key -// would return just one element. -func (n *Node) Key() Iterator { - switch n.Kind { - case KeyValue: - value := n.Child() - if !value.Valid() { - panic(fmt.Errorf("KeyValue should have at least two children")) - } - return Iterator{node: value.Next()} - case Table, ArrayTable: - return Iterator{node: n.Child()} - default: - panic(fmt.Errorf("Key() is not supported on a %s", n.Kind)) - } -} - -// Value returns a pointer to the value node of a KeyValue. -// Guaranteed to be non-nil. Panics if not called on a KeyValue node, -// or if the Children are malformed. -func (n *Node) Value() *Node { - return n.Child() -} - -// Children returns an iterator over a node's children. -func (n *Node) Children() Iterator { - return Iterator{node: n.Child()} -} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go b/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go deleted file mode 100644 index 9538e30df9..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go +++ /dev/null @@ -1,71 +0,0 @@ -package unstable - -// root contains a full AST. -// -// It is immutable once constructed with Builder. -type root struct { - nodes []Node -} - -// Iterator over the top level nodes. -func (r *root) Iterator() Iterator { - it := Iterator{} - if len(r.nodes) > 0 { - it.node = &r.nodes[0] - } - return it -} - -func (r *root) at(idx reference) *Node { - return &r.nodes[idx] -} - -type reference int - -const invalidReference reference = -1 - -func (r reference) Valid() bool { - return r != invalidReference -} - -type builder struct { - tree root - lastIdx int -} - -func (b *builder) Tree() *root { - return &b.tree -} - -func (b *builder) NodeAt(ref reference) *Node { - return b.tree.at(ref) -} - -func (b *builder) Reset() { - b.tree.nodes = b.tree.nodes[:0] - b.lastIdx = 0 -} - -func (b *builder) Push(n Node) reference { - b.lastIdx = len(b.tree.nodes) - b.tree.nodes = append(b.tree.nodes, n) - return reference(b.lastIdx) -} - -func (b *builder) PushAndChain(n Node) reference { - newIdx := len(b.tree.nodes) - b.tree.nodes = append(b.tree.nodes, n) - if b.lastIdx >= 0 { - b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx - } - b.lastIdx = newIdx - return reference(b.lastIdx) -} - -func (b *builder) AttachChild(parent reference, child reference) { - b.tree.nodes[parent].child = int(child) - int(parent) -} - -func (b *builder) Chain(from reference, to reference) { - b.tree.nodes[from].next = int(to) - int(from) -} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go b/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go deleted file mode 100644 index 7ff26c53c7..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package unstable provides APIs that do not meet the backward compatibility -// guarantees yet. -package unstable diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go b/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go deleted file mode 100644 index ff9df1bef8..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go +++ /dev/null @@ -1,71 +0,0 @@ -package unstable - -import "fmt" - -// Kind represents the type of TOML structure contained in a given Node. -type Kind int - -const ( - // Meta - Invalid Kind = iota - Comment - Key - - // Top level structures - Table - ArrayTable - KeyValue - - // Containers values - Array - InlineTable - - // Values - String - Bool - Float - Integer - LocalDate - LocalTime - LocalDateTime - DateTime -) - -// String implementation of fmt.Stringer. -func (k Kind) String() string { - switch k { - case Invalid: - return "Invalid" - case Comment: - return "Comment" - case Key: - return "Key" - case Table: - return "Table" - case ArrayTable: - return "ArrayTable" - case KeyValue: - return "KeyValue" - case Array: - return "Array" - case InlineTable: - return "InlineTable" - case String: - return "String" - case Bool: - return "Bool" - case Float: - return "Float" - case Integer: - return "Integer" - case LocalDate: - return "LocalDate" - case LocalTime: - return "LocalTime" - case LocalDateTime: - return "LocalDateTime" - case DateTime: - return "DateTime" - } - panic(fmt.Errorf("Kind.String() not implemented for '%d'", k)) -} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go b/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go deleted file mode 100644 index 50358a44ff..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go +++ /dev/null @@ -1,1245 +0,0 @@ -package unstable - -import ( - "bytes" - "fmt" - "unicode" - - "github.com/pelletier/go-toml/v2/internal/characters" - "github.com/pelletier/go-toml/v2/internal/danger" -) - -// ParserError describes an error relative to the content of the document. -// -// It cannot outlive the instance of Parser it refers to, and may cause panics -// if the parser is reset. -type ParserError struct { - Highlight []byte - Message string - Key []string // optional -} - -// Error is the implementation of the error interface. -func (e *ParserError) Error() string { - return e.Message -} - -// NewParserError is a convenience function to create a ParserError -// -// Warning: Highlight needs to be a subslice of Parser.data, so only slices -// returned by Parser.Raw are valid candidates. -func NewParserError(highlight []byte, format string, args ...interface{}) error { - return &ParserError{ - Highlight: highlight, - Message: fmt.Errorf(format, args...).Error(), - } -} - -// Parser scans over a TOML-encoded document and generates an iterative AST. -// -// To prime the Parser, first reset it with the contents of a TOML document. -// Then, process all top-level expressions sequentially. See Example. -// -// Don't forget to check Error() after you're done parsing. -// -// Each top-level expression needs to be fully processed before calling -// NextExpression() again. Otherwise, calls to various Node methods may panic if -// the parser has moved on the next expression. -// -// For performance reasons, go-toml doesn't make a copy of the input bytes to -// the parser. Make sure to copy all the bytes you need to outlive the slice -// given to the parser. -type Parser struct { - data []byte - builder builder - ref reference - left []byte - err error - first bool - - KeepComments bool -} - -// Data returns the slice provided to the last call to Reset. -func (p *Parser) Data() []byte { - return p.data -} - -// Range returns a range description that corresponds to a given slice of the -// input. If the argument is not a subslice of the parser input, this function -// panics. -func (p *Parser) Range(b []byte) Range { - return Range{ - Offset: uint32(danger.SubsliceOffset(p.data, b)), - Length: uint32(len(b)), - } -} - -// Raw returns the slice corresponding to the bytes in the given range. -func (p *Parser) Raw(raw Range) []byte { - return p.data[raw.Offset : raw.Offset+raw.Length] -} - -// Reset brings the parser to its initial state for a given input. It wipes an -// reuses internal storage to reduce allocation. -func (p *Parser) Reset(b []byte) { - p.builder.Reset() - p.ref = invalidReference - p.data = b - p.left = b - p.err = nil - p.first = true -} - -// NextExpression parses the next top-level expression. If an expression was -// successfully parsed, it returns true. If the parser is at the end of the -// document or an error occurred, it returns false. -// -// Retrieve the parsed expression with Expression(). -func (p *Parser) NextExpression() bool { - if len(p.left) == 0 || p.err != nil { - return false - } - - p.builder.Reset() - p.ref = invalidReference - - for { - if len(p.left) == 0 || p.err != nil { - return false - } - - if !p.first { - p.left, p.err = p.parseNewline(p.left) - } - - if len(p.left) == 0 || p.err != nil { - return false - } - - p.ref, p.left, p.err = p.parseExpression(p.left) - - if p.err != nil { - return false - } - - p.first = false - - if p.ref.Valid() { - return true - } - } -} - -// Expression returns a pointer to the node representing the last successfully -// parsed expression. -func (p *Parser) Expression() *Node { - return p.builder.NodeAt(p.ref) -} - -// Error returns any error that has occurred during parsing. -func (p *Parser) Error() error { - return p.err -} - -// Position describes a position in the input. -type Position struct { - // Number of bytes from the beginning of the input. - Offset int - // Line number, starting at 1. - Line int - // Column number, starting at 1. - Column int -} - -// Shape describes the position of a range in the input. -type Shape struct { - Start Position - End Position -} - -func (p *Parser) position(b []byte) Position { - offset := danger.SubsliceOffset(p.data, b) - - lead := p.data[:offset] - - return Position{ - Offset: offset, - Line: bytes.Count(lead, []byte{'\n'}) + 1, - Column: len(lead) - bytes.LastIndex(lead, []byte{'\n'}), - } -} - -// Shape returns the shape of the given range in the input. Will -// panic if the range is not a subslice of the input. -func (p *Parser) Shape(r Range) Shape { - raw := p.Raw(r) - return Shape{ - Start: p.position(raw), - End: p.position(raw[r.Length:]), - } -} - -func (p *Parser) parseNewline(b []byte) ([]byte, error) { - if b[0] == '\n' { - return b[1:], nil - } - - if b[0] == '\r' { - _, rest, err := scanWindowsNewline(b) - return rest, err - } - - return nil, NewParserError(b[0:1], "expected newline but got %#U", b[0]) -} - -func (p *Parser) parseComment(b []byte) (reference, []byte, error) { - ref := invalidReference - data, rest, err := scanComment(b) - if p.KeepComments && err == nil { - ref = p.builder.Push(Node{ - Kind: Comment, - Raw: p.Range(data), - Data: data, - }) - } - return ref, rest, err -} - -func (p *Parser) parseExpression(b []byte) (reference, []byte, error) { - // expression = ws [ comment ] - // expression =/ ws keyval ws [ comment ] - // expression =/ ws table ws [ comment ] - ref := invalidReference - - b = p.parseWhitespace(b) - - if len(b) == 0 { - return ref, b, nil - } - - if b[0] == '#' { - ref, rest, err := p.parseComment(b) - return ref, rest, err - } - - if b[0] == '\n' || b[0] == '\r' { - return ref, b, nil - } - - var err error - if b[0] == '[' { - ref, b, err = p.parseTable(b) - } else { - ref, b, err = p.parseKeyval(b) - } - - if err != nil { - return ref, nil, err - } - - b = p.parseWhitespace(b) - - if len(b) > 0 && b[0] == '#' { - cref, rest, err := p.parseComment(b) - if cref != invalidReference { - p.builder.Chain(ref, cref) - } - return ref, rest, err - } - - return ref, b, nil -} - -func (p *Parser) parseTable(b []byte) (reference, []byte, error) { - // table = std-table / array-table - if len(b) > 1 && b[1] == '[' { - return p.parseArrayTable(b) - } - - return p.parseStdTable(b) -} - -func (p *Parser) parseArrayTable(b []byte) (reference, []byte, error) { - // array-table = array-table-open key array-table-close - // array-table-open = %x5B.5B ws ; [[ Double left square bracket - // array-table-close = ws %x5D.5D ; ]] Double right square bracket - ref := p.builder.Push(Node{ - Kind: ArrayTable, - }) - - b = b[2:] - b = p.parseWhitespace(b) - - k, b, err := p.parseKey(b) - if err != nil { - return ref, nil, err - } - - p.builder.AttachChild(ref, k) - b = p.parseWhitespace(b) - - b, err = expect(']', b) - if err != nil { - return ref, nil, err - } - - b, err = expect(']', b) - - return ref, b, err -} - -func (p *Parser) parseStdTable(b []byte) (reference, []byte, error) { - // std-table = std-table-open key std-table-close - // std-table-open = %x5B ws ; [ Left square bracket - // std-table-close = ws %x5D ; ] Right square bracket - ref := p.builder.Push(Node{ - Kind: Table, - }) - - b = b[1:] - b = p.parseWhitespace(b) - - key, b, err := p.parseKey(b) - if err != nil { - return ref, nil, err - } - - p.builder.AttachChild(ref, key) - - b = p.parseWhitespace(b) - - b, err = expect(']', b) - - return ref, b, err -} - -func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) { - // keyval = key keyval-sep val - ref := p.builder.Push(Node{ - Kind: KeyValue, - }) - - key, b, err := p.parseKey(b) - if err != nil { - return invalidReference, nil, err - } - - // keyval-sep = ws %x3D ws ; = - - b = p.parseWhitespace(b) - - if len(b) == 0 { - return invalidReference, nil, NewParserError(b, "expected = after a key, but the document ends there") - } - - b, err = expect('=', b) - if err != nil { - return invalidReference, nil, err - } - - b = p.parseWhitespace(b) - - valRef, b, err := p.parseVal(b) - if err != nil { - return ref, b, err - } - - p.builder.Chain(valRef, key) - p.builder.AttachChild(ref, valRef) - - return ref, b, err -} - -//nolint:cyclop,funlen -func (p *Parser) parseVal(b []byte) (reference, []byte, error) { - // val = string / boolean / array / inline-table / date-time / float / integer - ref := invalidReference - - if len(b) == 0 { - return ref, nil, NewParserError(b, "expected value, not eof") - } - - var err error - c := b[0] - - switch c { - case '"': - var raw []byte - var v []byte - if scanFollowsMultilineBasicStringDelimiter(b) { - raw, v, b, err = p.parseMultilineBasicString(b) - } else { - raw, v, b, err = p.parseBasicString(b) - } - - if err == nil { - ref = p.builder.Push(Node{ - Kind: String, - Raw: p.Range(raw), - Data: v, - }) - } - - return ref, b, err - case '\'': - var raw []byte - var v []byte - if scanFollowsMultilineLiteralStringDelimiter(b) { - raw, v, b, err = p.parseMultilineLiteralString(b) - } else { - raw, v, b, err = p.parseLiteralString(b) - } - - if err == nil { - ref = p.builder.Push(Node{ - Kind: String, - Raw: p.Range(raw), - Data: v, - }) - } - - return ref, b, err - case 't': - if !scanFollowsTrue(b) { - return ref, nil, NewParserError(atmost(b, 4), "expected 'true'") - } - - ref = p.builder.Push(Node{ - Kind: Bool, - Data: b[:4], - }) - - return ref, b[4:], nil - case 'f': - if !scanFollowsFalse(b) { - return ref, nil, NewParserError(atmost(b, 5), "expected 'false'") - } - - ref = p.builder.Push(Node{ - Kind: Bool, - Data: b[:5], - }) - - return ref, b[5:], nil - case '[': - return p.parseValArray(b) - case '{': - return p.parseInlineTable(b) - default: - return p.parseIntOrFloatOrDateTime(b) - } -} - -func atmost(b []byte, n int) []byte { - if n >= len(b) { - return b - } - - return b[:n] -} - -func (p *Parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) { - v, rest, err := scanLiteralString(b) - if err != nil { - return nil, nil, nil, err - } - - return v, v[1 : len(v)-1], rest, nil -} - -func (p *Parser) parseInlineTable(b []byte) (reference, []byte, error) { - // inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close - // inline-table-open = %x7B ws ; { - // inline-table-close = ws %x7D ; } - // inline-table-sep = ws %x2C ws ; , Comma - // inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] - parent := p.builder.Push(Node{ - Kind: InlineTable, - Raw: p.Range(b[:1]), - }) - - first := true - - var child reference - - b = b[1:] - - var err error - - for len(b) > 0 { - previousB := b - b = p.parseWhitespace(b) - - if len(b) == 0 { - return parent, nil, NewParserError(previousB[:1], "inline table is incomplete") - } - - if b[0] == '}' { - break - } - - if !first { - b, err = expect(',', b) - if err != nil { - return parent, nil, err - } - b = p.parseWhitespace(b) - } - - var kv reference - - kv, b, err = p.parseKeyval(b) - if err != nil { - return parent, nil, err - } - - if first { - p.builder.AttachChild(parent, kv) - } else { - p.builder.Chain(child, kv) - } - child = kv - - first = false - } - - rest, err := expect('}', b) - - return parent, rest, err -} - -//nolint:funlen,cyclop -func (p *Parser) parseValArray(b []byte) (reference, []byte, error) { - // array = array-open [ array-values ] ws-comment-newline array-close - // array-open = %x5B ; [ - // array-close = %x5D ; ] - // array-values = ws-comment-newline val ws-comment-newline array-sep array-values - // array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] - // array-sep = %x2C ; , Comma - // ws-comment-newline = *( wschar / [ comment ] newline ) - arrayStart := b - b = b[1:] - - parent := p.builder.Push(Node{ - Kind: Array, - }) - - // First indicates whether the parser is looking for the first element - // (non-comment) of the array. - first := true - - lastChild := invalidReference - - addChild := func(valueRef reference) { - if lastChild == invalidReference { - p.builder.AttachChild(parent, valueRef) - } else { - p.builder.Chain(lastChild, valueRef) - } - lastChild = valueRef - } - - var err error - for len(b) > 0 { - cref := invalidReference - cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) - if err != nil { - return parent, nil, err - } - - if cref != invalidReference { - addChild(cref) - } - - if len(b) == 0 { - return parent, nil, NewParserError(arrayStart[:1], "array is incomplete") - } - - if b[0] == ']' { - break - } - - if b[0] == ',' { - if first { - return parent, nil, NewParserError(b[0:1], "array cannot start with comma") - } - b = b[1:] - - cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) - if err != nil { - return parent, nil, err - } - if cref != invalidReference { - addChild(cref) - } - } else if !first { - return parent, nil, NewParserError(b[0:1], "array elements must be separated by commas") - } - - // TOML allows trailing commas in arrays. - if len(b) > 0 && b[0] == ']' { - break - } - - var valueRef reference - valueRef, b, err = p.parseVal(b) - if err != nil { - return parent, nil, err - } - - addChild(valueRef) - - cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) - if err != nil { - return parent, nil, err - } - if cref != invalidReference { - addChild(cref) - } - - first = false - } - - rest, err := expect(']', b) - - return parent, rest, err -} - -func (p *Parser) parseOptionalWhitespaceCommentNewline(b []byte) (reference, []byte, error) { - rootCommentRef := invalidReference - latestCommentRef := invalidReference - - addComment := func(ref reference) { - if rootCommentRef == invalidReference { - rootCommentRef = ref - } else if latestCommentRef == invalidReference { - p.builder.AttachChild(rootCommentRef, ref) - latestCommentRef = ref - } else { - p.builder.Chain(latestCommentRef, ref) - latestCommentRef = ref - } - } - - for len(b) > 0 { - var err error - b = p.parseWhitespace(b) - - if len(b) > 0 && b[0] == '#' { - var ref reference - ref, b, err = p.parseComment(b) - if err != nil { - return invalidReference, nil, err - } - if ref != invalidReference { - addComment(ref) - } - } - - if len(b) == 0 { - break - } - - if b[0] == '\n' || b[0] == '\r' { - b, err = p.parseNewline(b) - if err != nil { - return invalidReference, nil, err - } - } else { - break - } - } - - return rootCommentRef, b, nil -} - -func (p *Parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) { - token, rest, err := scanMultilineLiteralString(b) - if err != nil { - return nil, nil, nil, err - } - - i := 3 - - // skip the immediate new line - if token[i] == '\n' { - i++ - } else if token[i] == '\r' && token[i+1] == '\n' { - i += 2 - } - - return token, token[i : len(token)-3], rest, err -} - -//nolint:funlen,gocognit,cyclop -func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) { - // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body - // ml-basic-string-delim - // ml-basic-string-delim = 3quotation-mark - // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] - // - // mlb-content = mlb-char / newline / mlb-escaped-nl - // mlb-char = mlb-unescaped / escaped - // mlb-quotes = 1*2quotation-mark - // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii - // mlb-escaped-nl = escape ws newline *( wschar / newline ) - token, escaped, rest, err := scanMultilineBasicString(b) - if err != nil { - return nil, nil, nil, err - } - - i := 3 - - // skip the immediate new line - if token[i] == '\n' { - i++ - } else if token[i] == '\r' && token[i+1] == '\n' { - i += 2 - } - - // fast path - startIdx := i - endIdx := len(token) - len(`"""`) - - if !escaped { - str := token[startIdx:endIdx] - verr := characters.Utf8TomlValidAlreadyEscaped(str) - if verr.Zero() { - return token, str, rest, nil - } - return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") - } - - var builder bytes.Buffer - - // The scanner ensures that the token starts and ends with quotes and that - // escapes are balanced. - for i < len(token)-3 { - c := token[i] - - //nolint:nestif - if c == '\\' { - // When the last non-whitespace character on a line is an unescaped \, - // it will be trimmed along with all whitespace (including newlines) up - // to the next non-whitespace character or closing delimiter. - - isLastNonWhitespaceOnLine := false - j := 1 - findEOLLoop: - for ; j < len(token)-3-i; j++ { - switch token[i+j] { - case ' ', '\t': - continue - case '\r': - if token[i+j+1] == '\n' { - continue - } - case '\n': - isLastNonWhitespaceOnLine = true - } - break findEOLLoop - } - if isLastNonWhitespaceOnLine { - i += j - for ; i < len(token)-3; i++ { - c := token[i] - if !(c == '\n' || c == '\r' || c == ' ' || c == '\t') { - i-- - break - } - } - i++ - continue - } - - // handle escaping - i++ - c = token[i] - - switch c { - case '"', '\\': - builder.WriteByte(c) - case 'b': - builder.WriteByte('\b') - case 'f': - builder.WriteByte('\f') - case 'n': - builder.WriteByte('\n') - case 'r': - builder.WriteByte('\r') - case 't': - builder.WriteByte('\t') - case 'e': - builder.WriteByte(0x1B) - case 'u': - x, err := hexToRune(atmost(token[i+1:], 4), 4) - if err != nil { - return nil, nil, nil, err - } - builder.WriteRune(x) - i += 4 - case 'U': - x, err := hexToRune(atmost(token[i+1:], 8), 8) - if err != nil { - return nil, nil, nil, err - } - - builder.WriteRune(x) - i += 8 - default: - return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) - } - i++ - } else { - size := characters.Utf8ValidNext(token[i:]) - if size == 0 { - return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) - } - builder.Write(token[i : i+size]) - i += size - } - } - - return token, builder.Bytes(), rest, nil -} - -func (p *Parser) parseKey(b []byte) (reference, []byte, error) { - // key = simple-key / dotted-key - // simple-key = quoted-key / unquoted-key - // - // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ - // quoted-key = basic-string / literal-string - // dotted-key = simple-key 1*( dot-sep simple-key ) - // - // dot-sep = ws %x2E ws ; . Period - raw, key, b, err := p.parseSimpleKey(b) - if err != nil { - return invalidReference, nil, err - } - - ref := p.builder.Push(Node{ - Kind: Key, - Raw: p.Range(raw), - Data: key, - }) - - for { - b = p.parseWhitespace(b) - if len(b) > 0 && b[0] == '.' { - b = p.parseWhitespace(b[1:]) - - raw, key, b, err = p.parseSimpleKey(b) - if err != nil { - return ref, nil, err - } - - p.builder.PushAndChain(Node{ - Kind: Key, - Raw: p.Range(raw), - Data: key, - }) - } else { - break - } - } - - return ref, b, nil -} - -func (p *Parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { - if len(b) == 0 { - return nil, nil, nil, NewParserError(b, "expected key but found none") - } - - // simple-key = quoted-key / unquoted-key - // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ - // quoted-key = basic-string / literal-string - switch { - case b[0] == '\'': - return p.parseLiteralString(b) - case b[0] == '"': - return p.parseBasicString(b) - case isUnquotedKeyChar(b[0]): - key, rest = scanUnquotedKey(b) - return key, key, rest, nil - default: - return nil, nil, nil, NewParserError(b[0:1], "invalid character at start of key: %c", b[0]) - } -} - -//nolint:funlen,cyclop -func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { - // basic-string = quotation-mark *basic-char quotation-mark - // quotation-mark = %x22 ; " - // basic-char = basic-unescaped / escaped - // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii - // escaped = escape escape-seq-char - // escape-seq-char = %x22 ; " quotation mark U+0022 - // escape-seq-char =/ %x5C ; \ reverse solidus U+005C - // escape-seq-char =/ %x62 ; b backspace U+0008 - // escape-seq-char =/ %x66 ; f form feed U+000C - // escape-seq-char =/ %x6E ; n line feed U+000A - // escape-seq-char =/ %x72 ; r carriage return U+000D - // escape-seq-char =/ %x74 ; t tab U+0009 - // escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX - // escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX - token, escaped, rest, err := scanBasicString(b) - if err != nil { - return nil, nil, nil, err - } - - startIdx := len(`"`) - endIdx := len(token) - len(`"`) - - // Fast path. If there is no escape sequence, the string should just be - // an UTF-8 encoded string, which is the same as Go. In that case, - // validate the string and return a direct reference to the buffer. - if !escaped { - str := token[startIdx:endIdx] - verr := characters.Utf8TomlValidAlreadyEscaped(str) - if verr.Zero() { - return token, str, rest, nil - } - return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") - } - - i := startIdx - - var builder bytes.Buffer - - // The scanner ensures that the token starts and ends with quotes and that - // escapes are balanced. - for i < len(token)-1 { - c := token[i] - if c == '\\' { - i++ - c = token[i] - - switch c { - case '"', '\\': - builder.WriteByte(c) - case 'b': - builder.WriteByte('\b') - case 'f': - builder.WriteByte('\f') - case 'n': - builder.WriteByte('\n') - case 'r': - builder.WriteByte('\r') - case 't': - builder.WriteByte('\t') - case 'e': - builder.WriteByte(0x1B) - case 'u': - x, err := hexToRune(token[i+1:len(token)-1], 4) - if err != nil { - return nil, nil, nil, err - } - - builder.WriteRune(x) - i += 4 - case 'U': - x, err := hexToRune(token[i+1:len(token)-1], 8) - if err != nil { - return nil, nil, nil, err - } - - builder.WriteRune(x) - i += 8 - default: - return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) - } - i++ - } else { - size := characters.Utf8ValidNext(token[i:]) - if size == 0 { - return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) - } - builder.Write(token[i : i+size]) - i += size - } - } - - return token, builder.Bytes(), rest, nil -} - -func hexToRune(b []byte, length int) (rune, error) { - if len(b) < length { - return -1, NewParserError(b, "unicode point needs %d character, not %d", length, len(b)) - } - b = b[:length] - - var r uint32 - for i, c := range b { - d := uint32(0) - switch { - case '0' <= c && c <= '9': - d = uint32(c - '0') - case 'a' <= c && c <= 'f': - d = uint32(c - 'a' + 10) - case 'A' <= c && c <= 'F': - d = uint32(c - 'A' + 10) - default: - return -1, NewParserError(b[i:i+1], "non-hex character") - } - r = r*16 + d - } - - if r > unicode.MaxRune || 0xD800 <= r && r < 0xE000 { - return -1, NewParserError(b, "escape sequence is invalid Unicode code point") - } - - return rune(r), nil -} - -func (p *Parser) parseWhitespace(b []byte) []byte { - // ws = *wschar - // wschar = %x20 ; Space - // wschar =/ %x09 ; Horizontal tab - _, rest := scanWhitespace(b) - - return rest -} - -//nolint:cyclop -func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error) { - switch b[0] { - case 'i': - if !scanFollowsInf(b) { - return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'inf'") - } - - return p.builder.Push(Node{ - Kind: Float, - Data: b[:3], - Raw: p.Range(b[:3]), - }), b[3:], nil - case 'n': - if !scanFollowsNan(b) { - return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'nan'") - } - - return p.builder.Push(Node{ - Kind: Float, - Data: b[:3], - Raw: p.Range(b[:3]), - }), b[3:], nil - case '+', '-': - return p.scanIntOrFloat(b) - } - - if len(b) < 3 { - return p.scanIntOrFloat(b) - } - - s := 5 - if len(b) < s { - s = len(b) - } - - for idx, c := range b[:s] { - if isDigit(c) { - continue - } - - if idx == 2 && c == ':' || (idx == 4 && c == '-') { - return p.scanDateTime(b) - } - - break - } - - return p.scanIntOrFloat(b) -} - -func (p *Parser) scanDateTime(b []byte) (reference, []byte, error) { - // scans for contiguous characters in [0-9T:Z.+-], and up to one space if - // followed by a digit. - hasDate := false - hasTime := false - hasTz := false - seenSpace := false - - i := 0 -byteLoop: - for ; i < len(b); i++ { - c := b[i] - - switch { - case isDigit(c): - case c == '-': - hasDate = true - const minOffsetOfTz = 8 - if i >= minOffsetOfTz { - hasTz = true - } - case c == 'T' || c == 't' || c == ':' || c == '.': - hasTime = true - case c == '+' || c == '-' || c == 'Z' || c == 'z': - hasTz = true - case c == ' ': - if !seenSpace && i+1 < len(b) && isDigit(b[i+1]) { - i += 2 - // Avoid reaching past the end of the document in case the time - // is malformed. See TestIssue585. - if i >= len(b) { - i-- - } - seenSpace = true - hasTime = true - } else { - break byteLoop - } - default: - break byteLoop - } - } - - var kind Kind - - if hasTime { - if hasDate { - if hasTz { - kind = DateTime - } else { - kind = LocalDateTime - } - } else { - kind = LocalTime - } - } else { - kind = LocalDate - } - - return p.builder.Push(Node{ - Kind: kind, - Data: b[:i], - }), b[i:], nil -} - -//nolint:funlen,gocognit,cyclop -func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { - i := 0 - - if len(b) > 2 && b[0] == '0' && b[1] != '.' && b[1] != 'e' && b[1] != 'E' { - var isValidRune validRuneFn - - switch b[1] { - case 'x': - isValidRune = isValidHexRune - case 'o': - isValidRune = isValidOctalRune - case 'b': - isValidRune = isValidBinaryRune - default: - i++ - } - - if isValidRune != nil { - i += 2 - for ; i < len(b); i++ { - if !isValidRune(b[i]) { - break - } - } - } - - return p.builder.Push(Node{ - Kind: Integer, - Data: b[:i], - Raw: p.Range(b[:i]), - }), b[i:], nil - } - - isFloat := false - - for ; i < len(b); i++ { - c := b[i] - - if c >= '0' && c <= '9' || c == '+' || c == '-' || c == '_' { - continue - } - - if c == '.' || c == 'e' || c == 'E' { - isFloat = true - - continue - } - - if c == 'i' { - if scanFollowsInf(b[i:]) { - return p.builder.Push(Node{ - Kind: Float, - Data: b[:i+3], - Raw: p.Range(b[:i+3]), - }), b[i+3:], nil - } - - return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'i' while scanning for a number") - } - - if c == 'n' { - if scanFollowsNan(b[i:]) { - return p.builder.Push(Node{ - Kind: Float, - Data: b[:i+3], - Raw: p.Range(b[:i+3]), - }), b[i+3:], nil - } - - return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'n' while scanning for a number") - } - - break - } - - if i == 0 { - return invalidReference, b, NewParserError(b, "incomplete number") - } - - kind := Integer - - if isFloat { - kind = Float - } - - return p.builder.Push(Node{ - Kind: kind, - Data: b[:i], - Raw: p.Range(b[:i]), - }), b[i:], nil -} - -func isDigit(r byte) bool { - return r >= '0' && r <= '9' -} - -type validRuneFn func(r byte) bool - -func isValidHexRune(r byte) bool { - return r >= 'a' && r <= 'f' || - r >= 'A' && r <= 'F' || - r >= '0' && r <= '9' || - r == '_' -} - -func isValidOctalRune(r byte) bool { - return r >= '0' && r <= '7' || r == '_' -} - -func isValidBinaryRune(r byte) bool { - return r == '0' || r == '1' || r == '_' -} - -func expect(x byte, b []byte) ([]byte, error) { - if len(b) == 0 { - return nil, NewParserError(b, "expected character %c but the document ended here", x) - } - - if b[0] != x { - return nil, NewParserError(b[0:1], "expected character %c", x) - } - - return b[1:], nil -} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go b/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go deleted file mode 100644 index 0512181d28..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go +++ /dev/null @@ -1,270 +0,0 @@ -package unstable - -import "github.com/pelletier/go-toml/v2/internal/characters" - -func scanFollows(b []byte, pattern string) bool { - n := len(pattern) - - return len(b) >= n && string(b[:n]) == pattern -} - -func scanFollowsMultilineBasicStringDelimiter(b []byte) bool { - return scanFollows(b, `"""`) -} - -func scanFollowsMultilineLiteralStringDelimiter(b []byte) bool { - return scanFollows(b, `'''`) -} - -func scanFollowsTrue(b []byte) bool { - return scanFollows(b, `true`) -} - -func scanFollowsFalse(b []byte) bool { - return scanFollows(b, `false`) -} - -func scanFollowsInf(b []byte) bool { - return scanFollows(b, `inf`) -} - -func scanFollowsNan(b []byte) bool { - return scanFollows(b, `nan`) -} - -func scanUnquotedKey(b []byte) ([]byte, []byte) { - // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ - for i := 0; i < len(b); i++ { - if !isUnquotedKeyChar(b[i]) { - return b[:i], b[i:] - } - } - - return b, b[len(b):] -} - -func isUnquotedKeyChar(r byte) bool { - return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '-' || r == '_' -} - -func scanLiteralString(b []byte) ([]byte, []byte, error) { - // literal-string = apostrophe *literal-char apostrophe - // apostrophe = %x27 ; ' apostrophe - // literal-char = %x09 / %x20-26 / %x28-7E / non-ascii - for i := 1; i < len(b); { - switch b[i] { - case '\'': - return b[:i+1], b[i+1:], nil - case '\n', '\r': - return nil, nil, NewParserError(b[i:i+1], "literal strings cannot have new lines") - } - size := characters.Utf8ValidNext(b[i:]) - if size == 0 { - return nil, nil, NewParserError(b[i:i+1], "invalid character") - } - i += size - } - - return nil, nil, NewParserError(b[len(b):], "unterminated literal string") -} - -func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) { - // ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body - // ml-literal-string-delim - // ml-literal-string-delim = 3apostrophe - // ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ] - // - // mll-content = mll-char / newline - // mll-char = %x09 / %x20-26 / %x28-7E / non-ascii - // mll-quotes = 1*2apostrophe - for i := 3; i < len(b); { - switch b[i] { - case '\'': - if scanFollowsMultilineLiteralStringDelimiter(b[i:]) { - i += 3 - - // At that point we found 3 apostrophe, and i is the - // index of the byte after the third one. The scanner - // needs to be eager, because there can be an extra 2 - // apostrophe that can be accepted at the end of the - // string. - - if i >= len(b) || b[i] != '\'' { - return b[:i], b[i:], nil - } - i++ - - if i >= len(b) || b[i] != '\'' { - return b[:i], b[i:], nil - } - i++ - - if i < len(b) && b[i] == '\'' { - return nil, nil, NewParserError(b[i-3:i+1], "''' not allowed in multiline literal string") - } - - return b[:i], b[i:], nil - } - case '\r': - if len(b) < i+2 { - return nil, nil, NewParserError(b[len(b):], `need a \n after \r`) - } - if b[i+1] != '\n' { - return nil, nil, NewParserError(b[i:i+2], `need a \n after \r`) - } - i += 2 // skip the \n - continue - } - size := characters.Utf8ValidNext(b[i:]) - if size == 0 { - return nil, nil, NewParserError(b[i:i+1], "invalid character") - } - i += size - } - - return nil, nil, NewParserError(b[len(b):], `multiline literal string not terminated by '''`) -} - -func scanWindowsNewline(b []byte) ([]byte, []byte, error) { - const lenCRLF = 2 - if len(b) < lenCRLF { - return nil, nil, NewParserError(b, "windows new line expected") - } - - if b[1] != '\n' { - return nil, nil, NewParserError(b, `windows new line should be \r\n`) - } - - return b[:lenCRLF], b[lenCRLF:], nil -} - -func scanWhitespace(b []byte) ([]byte, []byte) { - for i := 0; i < len(b); i++ { - switch b[i] { - case ' ', '\t': - continue - default: - return b[:i], b[i:] - } - } - - return b, b[len(b):] -} - -func scanComment(b []byte) ([]byte, []byte, error) { - // comment-start-symbol = %x23 ; # - // non-ascii = %x80-D7FF / %xE000-10FFFF - // non-eol = %x09 / %x20-7F / non-ascii - // - // comment = comment-start-symbol *non-eol - - for i := 1; i < len(b); { - if b[i] == '\n' { - return b[:i], b[i:], nil - } - if b[i] == '\r' { - if i+1 < len(b) && b[i+1] == '\n' { - return b[:i+1], b[i+1:], nil - } - return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") - } - size := characters.Utf8ValidNext(b[i:]) - if size == 0 { - return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") - } - - i += size - } - - return b, b[len(b):], nil -} - -func scanBasicString(b []byte) ([]byte, bool, []byte, error) { - // basic-string = quotation-mark *basic-char quotation-mark - // quotation-mark = %x22 ; " - // basic-char = basic-unescaped / escaped - // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii - // escaped = escape escape-seq-char - escaped := false - i := 1 - - for ; i < len(b); i++ { - switch b[i] { - case '"': - return b[:i+1], escaped, b[i+1:], nil - case '\n', '\r': - return nil, escaped, nil, NewParserError(b[i:i+1], "basic strings cannot have new lines") - case '\\': - if len(b) < i+2 { - return nil, escaped, nil, NewParserError(b[i:i+1], "need a character after \\") - } - escaped = true - i++ // skip the next character - } - } - - return nil, escaped, nil, NewParserError(b[len(b):], `basic string not terminated by "`) -} - -func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) { - // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body - // ml-basic-string-delim - // ml-basic-string-delim = 3quotation-mark - // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] - // - // mlb-content = mlb-char / newline / mlb-escaped-nl - // mlb-char = mlb-unescaped / escaped - // mlb-quotes = 1*2quotation-mark - // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii - // mlb-escaped-nl = escape ws newline *( wschar / newline ) - - escaped := false - i := 3 - - for ; i < len(b); i++ { - switch b[i] { - case '"': - if scanFollowsMultilineBasicStringDelimiter(b[i:]) { - i += 3 - - // At that point we found 3 apostrophe, and i is the - // index of the byte after the third one. The scanner - // needs to be eager, because there can be an extra 2 - // apostrophe that can be accepted at the end of the - // string. - - if i >= len(b) || b[i] != '"' { - return b[:i], escaped, b[i:], nil - } - i++ - - if i >= len(b) || b[i] != '"' { - return b[:i], escaped, b[i:], nil - } - i++ - - if i < len(b) && b[i] == '"' { - return nil, escaped, nil, NewParserError(b[i-3:i+1], `""" not allowed in multiline basic string`) - } - - return b[:i], escaped, b[i:], nil - } - case '\\': - if len(b) < i+2 { - return nil, escaped, nil, NewParserError(b[len(b):], "need a character after \\") - } - escaped = true - i++ // skip the next character - case '\r': - if len(b) < i+2 { - return nil, escaped, nil, NewParserError(b[len(b):], `need a \n after \r`) - } - if b[i+1] != '\n' { - return nil, escaped, nil, NewParserError(b[i:i+2], `need a \n after \r`) - } - i++ // skip the \n - } - } - - return nil, escaped, nil, NewParserError(b[len(b):], `multiline basic string not terminated by """`) -} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b1b3..0000000000 --- a/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index 9159de03e0..0000000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -script: - - make check diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e755..0000000000 --- a/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile deleted file mode 100644 index ce9d7cded6..0000000000 --- a/vendor/github.com/pkg/errors/Makefile +++ /dev/null @@ -1,44 +0,0 @@ -PKGS := github.com/pkg/errors -SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) -GO := go - -check: test vet gofmt misspell unconvert staticcheck ineffassign unparam - -test: - $(GO) test $(PKGS) - -vet: | test - $(GO) vet $(PKGS) - -staticcheck: - $(GO) get honnef.co/go/tools/cmd/staticcheck - staticcheck -checks all $(PKGS) - -misspell: - $(GO) get github.com/client9/misspell/cmd/misspell - misspell \ - -locale GB \ - -error \ - *.md *.go - -unconvert: - $(GO) get github.com/mdempsky/unconvert - unconvert -v $(PKGS) - -ineffassign: - $(GO) get github.com/gordonklaus/ineffassign - find $(SRCDIRS) -name '*.go' | xargs ineffassign - -pedantic: check errcheck - -unparam: - $(GO) get mvdan.cc/unparam - unparam ./... - -errcheck: - $(GO) get github.com/kisielk/errcheck - errcheck $(PKGS) - -gofmt: - @echo Checking code is gofmted - @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 54dfdcb12e..0000000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Roadmap - -With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: - -- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) -- 1.0. Final release. - -## Contributing - -Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. - -Before sending a PR, please discuss your change by raising an issue. - -## License - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932eade02..0000000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 161aea2582..0000000000 --- a/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,288 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which when applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// together with the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required, the errors.WithStack and -// errors.WithMessage functions destructure errors.Wrap into its component -// operations: annotating an error with a stack trace and with a message, -// respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error that does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// Although the causer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported: -// -// %s print the error. If the error has a Cause it will be -// printed recursively. -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface: -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// The returned errors.StackTrace type is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d\n", f, f) -// } -// } -// -// Although the stackTracer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withStack) Unwrap() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is called, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -// WithMessagef annotates err with the format specifier. -// If err is nil, WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withMessage) Unwrap() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go deleted file mode 100644 index be0d10d0c7..0000000000 --- a/vendor/github.com/pkg/errors/go113.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build go1.13 - -package errors - -import ( - stderrors "errors" -) - -// Is reports whether any error in err's chain matches target. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -func Is(err, target error) bool { return stderrors.Is(err, target) } - -// As finds the first error in err's chain that matches target, and if so, sets -// target to that error value and returns true. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error matches target if the error's concrete value is assignable to the value -// pointed to by target, or if the error has a method As(interface{}) bool such that -// As(target) returns true. In the latter case, the As method is responsible for -// setting target. -// -// As will panic if target is not a non-nil pointer to either a type that implements -// error, or to any interface type. As returns false if err is nil. -func As(err error, target interface{}) bool { return stderrors.As(err, target) } - -// Unwrap returns the result of calling the Unwrap method on err, if err's -// type contains an Unwrap method returning error. -// Otherwise, Unwrap returns nil. -func Unwrap(err error) error { - return stderrors.Unwrap(err) -} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 779a8348fb..0000000000 --- a/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,177 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strconv" - "strings" -) - -// Frame represents a program counter inside a stack frame. -// For historical reasons if Frame is interpreted as a uintptr -// its value represents the program counter + 1. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// name returns the name of this function, if known. -func (f Frame) name() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - return fn.Name() -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s function name and path of source file relative to the compile time -// GOPATH separated by \n\t (\n\t) -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - io.WriteString(s, f.name()) - io.WriteString(s, "\n\t") - io.WriteString(s, f.file()) - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - io.WriteString(s, strconv.Itoa(f.line())) - case 'n': - io.WriteString(s, funcname(f.name())) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// MarshalText formats a stacktrace Frame as a text string. The output is the -// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. -func (f Frame) MarshalText() ([]byte, error) { - name := f.name() - if name == "unknown" { - return []byte(name), nil - } - return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - io.WriteString(s, "\n") - f.Format(s, verb) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - st.formatSlice(s, verb) - } - case 's': - st.formatSlice(s, verb) - } -} - -// formatSlice will format this StackTrace into the given buffer as a slice of -// Frame, only valid when called with '%s' or '%v'. -func (st StackTrace) formatSlice(s fmt.State, verb rune) { - io.WriteString(s, "[") - for i, f := range st { - if i > 0 { - io.WriteString(s, " ") - } - f.Format(s, verb) - } - io.WriteString(s, "]") -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} diff --git a/vendor/github.com/russross/blackfriday/v2/.gitignore b/vendor/github.com/russross/blackfriday/v2/.gitignore deleted file mode 100644 index 75623dcccb..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -*.out -*.swp -*.8 -*.6 -_obj -_test* -markdown -tags diff --git a/vendor/github.com/russross/blackfriday/v2/.travis.yml b/vendor/github.com/russross/blackfriday/v2/.travis.yml deleted file mode 100644 index b0b525a5a8..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -sudo: false -language: go -go: - - "1.10.x" - - "1.11.x" - - tip -matrix: - fast_finish: true - allow_failures: - - go: tip -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v ./... diff --git a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt deleted file mode 100644 index 2885af3602..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt +++ /dev/null @@ -1,29 +0,0 @@ -Blackfriday is distributed under the Simplified BSD License: - -> Copyright © 2011 Russ Ross -> All rights reserved. -> -> Redistribution and use in source and binary forms, with or without -> modification, are permitted provided that the following conditions -> are met: -> -> 1. Redistributions of source code must retain the above copyright -> notice, this list of conditions and the following disclaimer. -> -> 2. Redistributions in binary form must reproduce the above -> copyright notice, this list of conditions and the following -> disclaimer in the documentation and/or other materials provided with -> the distribution. -> -> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md deleted file mode 100644 index d9c08a22fc..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/README.md +++ /dev/null @@ -1,335 +0,0 @@ -Blackfriday -[![Build Status][BuildV2SVG]][BuildV2URL] -[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL] -=========== - -Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It -is paranoid about its input (so you can safely feed it user-supplied -data), it is fast, it supports common extensions (tables, smart -punctuation substitutions, etc.), and it is safe for all utf-8 -(unicode) input. - -HTML output is currently supported, along with Smartypants -extensions. - -It started as a translation from C of [Sundown][3]. - - -Installation ------------- - -Blackfriday is compatible with modern Go releases in module mode. -With Go installed: - - go get github.com/russross/blackfriday/v2 - -will resolve and add the package to the current development module, -then build and install it. Alternatively, you can achieve the same -if you import it in a package: - - import "github.com/russross/blackfriday/v2" - -and `go get` without parameters. - -Legacy GOPATH mode is unsupported. - - -Versions --------- - -Currently maintained and recommended version of Blackfriday is `v2`. It's being -developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the -documentation is available at -https://pkg.go.dev/github.com/russross/blackfriday/v2. - -It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`. - -Version 2 offers a number of improvements over v1: - -* Cleaned up API -* A separate call to [`Parse`][4], which produces an abstract syntax tree for - the document -* Latest bug fixes -* Flexibility to easily add your own rendering extensions - -Potential drawbacks: - -* Our benchmarks show v2 to be slightly slower than v1. Currently in the - ballpark of around 15%. -* API breakage. If you can't afford modifying your code to adhere to the new API - and don't care too much about the new features, v2 is probably not for you. -* Several bug fixes are trailing behind and still need to be forward-ported to - v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for - tracking. - -If you are still interested in the legacy `v1`, you can import it from -`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found -here: https://pkg.go.dev/github.com/russross/blackfriday. - - -Usage ------ - -For the most sensible markdown processing, it is as simple as getting your input -into a byte slice and calling: - -```go -output := blackfriday.Run(input) -``` - -Your input will be parsed and the output rendered with a set of most popular -extensions enabled. If you want the most basic feature set, corresponding with -the bare Markdown specification, use: - -```go -output := blackfriday.Run(input, blackfriday.WithNoExtensions()) -``` - -### Sanitize untrusted content - -Blackfriday itself does nothing to protect against malicious content. If you are -dealing with user-supplied markdown, we recommend running Blackfriday's output -through HTML sanitizer such as [Bluemonday][5]. - -Here's an example of simple usage of Blackfriday together with Bluemonday: - -```go -import ( - "github.com/microcosm-cc/bluemonday" - "github.com/russross/blackfriday/v2" -) - -// ... -unsafe := blackfriday.Run(input) -html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) -``` - -### Custom options - -If you want to customize the set of options, use `blackfriday.WithExtensions`, -`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. - -### `blackfriday-tool` - -You can also check out `blackfriday-tool` for a more complete example -of how to use it. Download and install it using: - - go get github.com/russross/blackfriday-tool - -This is a simple command-line tool that allows you to process a -markdown file using a standalone program. You can also browse the -source directly on github if you are just looking for some example -code: - -* - -Note that if you have not already done so, installing -`blackfriday-tool` will be sufficient to download and install -blackfriday in addition to the tool itself. The tool binary will be -installed in `$GOPATH/bin`. This is a statically-linked binary that -can be copied to wherever you need it without worrying about -dependencies and library versions. - -### Sanitized anchor names - -Blackfriday includes an algorithm for creating sanitized anchor names -corresponding to a given input text. This algorithm is used to create -anchors for headings when `AutoHeadingIDs` extension is enabled. The -algorithm has a specification, so that other packages can create -compatible anchor names and links to those anchors. - -The specification is located at https://pkg.go.dev/github.com/russross/blackfriday/v2#hdr-Sanitized_Anchor_Names. - -[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday/v2#SanitizedAnchorName) exposes this functionality, and can be used to -create compatible links to the anchor names generated by blackfriday. -This algorithm is also implemented in a small standalone package at -[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients -that want a small package and don't need full functionality of blackfriday. - - -Features --------- - -All features of Sundown are supported, including: - -* **Compatibility**. The Markdown v1.0.3 test suite passes with - the `--tidy` option. Without `--tidy`, the differences are - mostly in whitespace and entity escaping, where blackfriday is - more consistent and cleaner. - -* **Common extensions**, including table support, fenced code - blocks, autolinks, strikethroughs, non-strict emphasis, etc. - -* **Safety**. Blackfriday is paranoid when parsing, making it safe - to feed untrusted user input without fear of bad things - happening. The test suite stress tests this and there are no - known inputs that make it crash. If you find one, please let me - know and send me the input that does it. - - NOTE: "safety" in this context means *runtime safety only*. In order to - protect yourself against JavaScript injection in untrusted content, see - [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). - -* **Fast processing**. It is fast enough to render on-demand in - most web applications without having to cache the output. - -* **Thread safety**. You can run multiple parsers in different - goroutines without ill effect. There is no dependence on global - shared state. - -* **Minimal dependencies**. Blackfriday only depends on standard - library packages in Go. The source code is pretty - self-contained, so it is easy to add to any project, including - Google App Engine projects. - -* **Standards compliant**. Output successfully validates using the - W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. - - -Extensions ----------- - -In addition to the standard markdown syntax, this package -implements the following extensions: - -* **Intra-word emphasis supression**. The `_` character is - commonly used inside words when discussing code, so having - markdown interpret it as an emphasis command is usually the - wrong thing. Blackfriday lets you treat all emphasis markers as - normal characters when they occur inside a word. - -* **Tables**. Tables can be created by drawing them in the input - using a simple syntax: - - ``` - Name | Age - --------|------ - Bob | 27 - Alice | 23 - ``` - -* **Fenced code blocks**. In addition to the normal 4-space - indentation to mark code blocks, you can explicitly mark them - and supply a language (to make syntax highlighting simple). Just - mark it like this: - - ```go - func getTrue() bool { - return true - } - ``` - - You can use 3 or more backticks to mark the beginning of the - block, and the same number to mark the end of the block. - - To preserve classes of fenced code blocks while using the bluemonday - HTML sanitizer, use the following policy: - - ```go - p := bluemonday.UGCPolicy() - p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") - html := p.SanitizeBytes(unsafe) - ``` - -* **Definition lists**. A simple definition list is made of a single-line - term followed by a colon and the definition for that term. - - Cat - : Fluffy animal everyone likes - - Internet - : Vector of transmission for pictures of cats - - Terms must be separated from the previous definition by a blank line. - -* **Footnotes**. A marker in the text that will become a superscript number; - a footnote definition that will be placed in a list of footnotes at the - end of the document. A footnote looks like this: - - This is a footnote.[^1] - - [^1]: the footnote text. - -* **Autolinking**. Blackfriday can find URLs that have not been - explicitly marked as links and turn them into links. - -* **Strikethrough**. Use two tildes (`~~`) to mark text that - should be crossed out. - -* **Hard line breaks**. With this extension enabled newlines in the input - translate into line breaks in the output. This extension is off by default. - -* **Smart quotes**. Smartypants-style punctuation substitution is - supported, turning normal double- and single-quote marks into - curly quotes, etc. - -* **LaTeX-style dash parsing** is an additional option, where `--` - is translated into `–`, and `---` is translated into - `—`. This differs from most smartypants processors, which - turn a single hyphen into an ndash and a double hyphen into an - mdash. - -* **Smart fractions**, where anything that looks like a fraction - is translated into suitable HTML (instead of just a few special - cases like most smartypant processors). For example, `4/5` - becomes `45`, which renders as - 45. - - -Other renderers ---------------- - -Blackfriday is structured to allow alternative rendering engines. Here -are a few of note: - -* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown): - provides a GitHub Flavored Markdown renderer with fenced code block - highlighting, clickable heading anchor links. - - It's not customizable, and its goal is to produce HTML output - equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), - except the rendering is performed locally. - -* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, - but for markdown. - -* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex): - renders output as LaTeX. - -* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience - integration with the [Chroma](https://github.com/alecthomas/chroma) code - highlighting library. bfchroma is only compatible with v2 of Blackfriday and - provides a drop-in renderer ready to use with Blackfriday, as well as - options and means for further customization. - -* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. - -* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style - - -TODO ----- - -* More unit testing -* Improve Unicode support. It does not understand all Unicode - rules (about what constitutes a letter, a punctuation symbol, - etc.), so it may fail to detect word boundaries correctly in - some instances. It is safe on all UTF-8 input. - - -License -------- - -[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) - - - [1]: https://daringfireball.net/projects/markdown/ "Markdown" - [2]: https://golang.org/ "Go Language" - [3]: https://github.com/vmg/sundown "Sundown" - [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func" - [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" - - [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2 - [BuildV2URL]: https://travis-ci.org/russross/blackfriday - [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2 - [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go deleted file mode 100644 index dcd61e6e35..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/block.go +++ /dev/null @@ -1,1612 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// Functions to parse block-level elements. -// - -package blackfriday - -import ( - "bytes" - "html" - "regexp" - "strings" - "unicode" -) - -const ( - charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});" - escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" -) - -var ( - reBackslashOrAmp = regexp.MustCompile("[\\&]") - reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity) -) - -// Parse block-level data. -// Note: this function and many that it calls assume that -// the input buffer ends with a newline. -func (p *Markdown) block(data []byte) { - // this is called recursively: enforce a maximum depth - if p.nesting >= p.maxNesting { - return - } - p.nesting++ - - // parse out one block-level construct at a time - for len(data) > 0 { - // prefixed heading: - // - // # Heading 1 - // ## Heading 2 - // ... - // ###### Heading 6 - if p.isPrefixHeading(data) { - data = data[p.prefixHeading(data):] - continue - } - - // block of preformatted HTML: - // - //
- // ... - //
- if data[0] == '<' { - if i := p.html(data, true); i > 0 { - data = data[i:] - continue - } - } - - // title block - // - // % stuff - // % more stuff - // % even more stuff - if p.extensions&Titleblock != 0 { - if data[0] == '%' { - if i := p.titleBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - } - - // blank lines. note: returns the # of bytes to skip - if i := p.isEmpty(data); i > 0 { - data = data[i:] - continue - } - - // indented code block: - // - // func max(a, b int) int { - // if a > b { - // return a - // } - // return b - // } - if p.codePrefix(data) > 0 { - data = data[p.code(data):] - continue - } - - // fenced code block: - // - // ``` go - // func fact(n int) int { - // if n <= 1 { - // return n - // } - // return n * fact(n-1) - // } - // ``` - if p.extensions&FencedCode != 0 { - if i := p.fencedCodeBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - - // horizontal rule: - // - // ------ - // or - // ****** - // or - // ______ - if p.isHRule(data) { - p.addBlock(HorizontalRule, nil) - var i int - for i = 0; i < len(data) && data[i] != '\n'; i++ { - } - data = data[i:] - continue - } - - // block quote: - // - // > A big quote I found somewhere - // > on the web - if p.quotePrefix(data) > 0 { - data = data[p.quote(data):] - continue - } - - // table: - // - // Name | Age | Phone - // ------|-----|--------- - // Bob | 31 | 555-1234 - // Alice | 27 | 555-4321 - if p.extensions&Tables != 0 { - if i := p.table(data); i > 0 { - data = data[i:] - continue - } - } - - // an itemized/unordered list: - // - // * Item 1 - // * Item 2 - // - // also works with + or - - if p.uliPrefix(data) > 0 { - data = data[p.list(data, 0):] - continue - } - - // a numbered/ordered list: - // - // 1. Item 1 - // 2. Item 2 - if p.oliPrefix(data) > 0 { - data = data[p.list(data, ListTypeOrdered):] - continue - } - - // definition lists: - // - // Term 1 - // : Definition a - // : Definition b - // - // Term 2 - // : Definition c - if p.extensions&DefinitionLists != 0 { - if p.dliPrefix(data) > 0 { - data = data[p.list(data, ListTypeDefinition):] - continue - } - } - - // anything else must look like a normal paragraph - // note: this finds underlined headings, too - data = data[p.paragraph(data):] - } - - p.nesting-- -} - -func (p *Markdown) addBlock(typ NodeType, content []byte) *Node { - p.closeUnmatchedBlocks() - container := p.addChild(typ, 0) - container.content = content - return container -} - -func (p *Markdown) isPrefixHeading(data []byte) bool { - if data[0] != '#' { - return false - } - - if p.extensions&SpaceHeadings != 0 { - level := 0 - for level < 6 && level < len(data) && data[level] == '#' { - level++ - } - if level == len(data) || data[level] != ' ' { - return false - } - } - return true -} - -func (p *Markdown) prefixHeading(data []byte) int { - level := 0 - for level < 6 && level < len(data) && data[level] == '#' { - level++ - } - i := skipChar(data, level, ' ') - end := skipUntilChar(data, i, '\n') - skip := end - id := "" - if p.extensions&HeadingIDs != 0 { - j, k := 0, 0 - // find start/end of heading id - for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { - } - for k = j + 1; k < end && data[k] != '}'; k++ { - } - // extract heading id iff found - if j < end && k < end { - id = string(data[j+2 : k]) - end = j - skip = k + 1 - for end > 0 && data[end-1] == ' ' { - end-- - } - } - } - for end > 0 && data[end-1] == '#' { - if isBackslashEscaped(data, end-1) { - break - } - end-- - } - for end > 0 && data[end-1] == ' ' { - end-- - } - if end > i { - if id == "" && p.extensions&AutoHeadingIDs != 0 { - id = SanitizedAnchorName(string(data[i:end])) - } - block := p.addBlock(Heading, data[i:end]) - block.HeadingID = id - block.Level = level - } - return skip -} - -func (p *Markdown) isUnderlinedHeading(data []byte) int { - // test of level 1 heading - if data[0] == '=' { - i := skipChar(data, 1, '=') - i = skipChar(data, i, ' ') - if i < len(data) && data[i] == '\n' { - return 1 - } - return 0 - } - - // test of level 2 heading - if data[0] == '-' { - i := skipChar(data, 1, '-') - i = skipChar(data, i, ' ') - if i < len(data) && data[i] == '\n' { - return 2 - } - return 0 - } - - return 0 -} - -func (p *Markdown) titleBlock(data []byte, doRender bool) int { - if data[0] != '%' { - return 0 - } - splitData := bytes.Split(data, []byte("\n")) - var i int - for idx, b := range splitData { - if !bytes.HasPrefix(b, []byte("%")) { - i = idx // - 1 - break - } - } - - data = bytes.Join(splitData[0:i], []byte("\n")) - consumed := len(data) - data = bytes.TrimPrefix(data, []byte("% ")) - data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) - block := p.addBlock(Heading, data) - block.Level = 1 - block.IsTitleblock = true - - return consumed -} - -func (p *Markdown) html(data []byte, doRender bool) int { - var i, j int - - // identify the opening tag - if data[0] != '<' { - return 0 - } - curtag, tagfound := p.htmlFindTag(data[1:]) - - // handle special cases - if !tagfound { - // check for an HTML comment - if size := p.htmlComment(data, doRender); size > 0 { - return size - } - - // check for an
tag - if size := p.htmlHr(data, doRender); size > 0 { - return size - } - - // no special case recognized - return 0 - } - - // look for an unindented matching closing tag - // followed by a blank line - found := false - /* - closetag := []byte("\n") - j = len(curtag) + 1 - for !found { - // scan for a closing tag at the beginning of a line - if skip := bytes.Index(data[j:], closetag); skip >= 0 { - j += skip + len(closetag) - } else { - break - } - - // see if it is the only thing on the line - if skip := p.isEmpty(data[j:]); skip > 0 { - // see if it is followed by a blank line/eof - j += skip - if j >= len(data) { - found = true - i = j - } else { - if skip := p.isEmpty(data[j:]); skip > 0 { - j += skip - found = true - i = j - } - } - } - } - */ - - // if not found, try a second pass looking for indented match - // but not if tag is "ins" or "del" (following original Markdown.pl) - if !found && curtag != "ins" && curtag != "del" { - i = 1 - for i < len(data) { - i++ - for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { - i++ - } - - if i+2+len(curtag) >= len(data) { - break - } - - j = p.htmlFindEnd(curtag, data[i-1:]) - - if j > 0 { - i += j - 1 - found = true - break - } - } - } - - if !found { - return 0 - } - - // the end of the block has been found - if doRender { - // trim newlines - end := i - for end > 0 && data[end-1] == '\n' { - end-- - } - finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) - } - - return i -} - -func finalizeHTMLBlock(block *Node) { - block.Literal = block.content - block.content = nil -} - -// HTML comment, lax form -func (p *Markdown) htmlComment(data []byte, doRender bool) int { - i := p.inlineHTMLComment(data) - // needs to end with a blank line - if j := p.isEmpty(data[i:]); j > 0 { - size := i + j - if doRender { - // trim trailing newlines - end := size - for end > 0 && data[end-1] == '\n' { - end-- - } - block := p.addBlock(HTMLBlock, data[:end]) - finalizeHTMLBlock(block) - } - return size - } - return 0 -} - -// HR, which is the only self-closing block tag considered -func (p *Markdown) htmlHr(data []byte, doRender bool) int { - if len(data) < 4 { - return 0 - } - if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { - return 0 - } - if data[3] != ' ' && data[3] != '/' && data[3] != '>' { - // not an
tag after all; at least not a valid one - return 0 - } - i := 3 - for i < len(data) && data[i] != '>' && data[i] != '\n' { - i++ - } - if i < len(data) && data[i] == '>' { - i++ - if j := p.isEmpty(data[i:]); j > 0 { - size := i + j - if doRender { - // trim newlines - end := size - for end > 0 && data[end-1] == '\n' { - end-- - } - finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) - } - return size - } - } - return 0 -} - -func (p *Markdown) htmlFindTag(data []byte) (string, bool) { - i := 0 - for i < len(data) && isalnum(data[i]) { - i++ - } - key := string(data[:i]) - if _, ok := blockTags[key]; ok { - return key, true - } - return "", false -} - -func (p *Markdown) htmlFindEnd(tag string, data []byte) int { - // assume data[0] == '<' && data[1] == '/' already tested - if tag == "hr" { - return 2 - } - // check if tag is a match - closetag := []byte("") - if !bytes.HasPrefix(data, closetag) { - return 0 - } - i := len(closetag) - - // check that the rest of the line is blank - skip := 0 - if skip = p.isEmpty(data[i:]); skip == 0 { - return 0 - } - i += skip - skip = 0 - - if i >= len(data) { - return i - } - - if p.extensions&LaxHTMLBlocks != 0 { - return i - } - if skip = p.isEmpty(data[i:]); skip == 0 { - // following line must be blank - return 0 - } - - return i + skip -} - -func (*Markdown) isEmpty(data []byte) int { - // it is okay to call isEmpty on an empty buffer - if len(data) == 0 { - return 0 - } - - var i int - for i = 0; i < len(data) && data[i] != '\n'; i++ { - if data[i] != ' ' && data[i] != '\t' { - return 0 - } - } - if i < len(data) && data[i] == '\n' { - i++ - } - return i -} - -func (*Markdown) isHRule(data []byte) bool { - i := 0 - - // skip up to three spaces - for i < 3 && data[i] == ' ' { - i++ - } - - // look at the hrule char - if data[i] != '*' && data[i] != '-' && data[i] != '_' { - return false - } - c := data[i] - - // the whole line must be the char or whitespace - n := 0 - for i < len(data) && data[i] != '\n' { - switch { - case data[i] == c: - n++ - case data[i] != ' ': - return false - } - i++ - } - - return n >= 3 -} - -// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, -// and returns the end index if so, or 0 otherwise. It also returns the marker found. -// If info is not nil, it gets set to the syntax specified in the fence line. -func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) { - i, size := 0, 0 - - // skip up to three spaces - for i < len(data) && i < 3 && data[i] == ' ' { - i++ - } - - // check for the marker characters: ~ or ` - if i >= len(data) { - return 0, "" - } - if data[i] != '~' && data[i] != '`' { - return 0, "" - } - - c := data[i] - - // the whole line must be the same char or whitespace - for i < len(data) && data[i] == c { - size++ - i++ - } - - // the marker char must occur at least 3 times - if size < 3 { - return 0, "" - } - marker = string(data[i-size : i]) - - // if this is the end marker, it must match the beginning marker - if oldmarker != "" && marker != oldmarker { - return 0, "" - } - - // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here - // into one, always get the info string, and discard it if the caller doesn't care. - if info != nil { - infoLength := 0 - i = skipChar(data, i, ' ') - - if i >= len(data) { - if i == len(data) { - return i, marker - } - return 0, "" - } - - infoStart := i - - if data[i] == '{' { - i++ - infoStart++ - - for i < len(data) && data[i] != '}' && data[i] != '\n' { - infoLength++ - i++ - } - - if i >= len(data) || data[i] != '}' { - return 0, "" - } - - // strip all whitespace at the beginning and the end - // of the {} block - for infoLength > 0 && isspace(data[infoStart]) { - infoStart++ - infoLength-- - } - - for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { - infoLength-- - } - i++ - i = skipChar(data, i, ' ') - } else { - for i < len(data) && !isverticalspace(data[i]) { - infoLength++ - i++ - } - } - - *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) - } - - if i == len(data) { - return i, marker - } - if i > len(data) || data[i] != '\n' { - return 0, "" - } - return i + 1, marker // Take newline into account. -} - -// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, -// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. -// If doRender is true, a final newline is mandatory to recognize the fenced code block. -func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { - var info string - beg, marker := isFenceLine(data, &info, "") - if beg == 0 || beg >= len(data) { - return 0 - } - fenceLength := beg - 1 - - var work bytes.Buffer - work.Write([]byte(info)) - work.WriteByte('\n') - - for { - // safe to assume beg < len(data) - - // check for the end of the code block - fenceEnd, _ := isFenceLine(data[beg:], nil, marker) - if fenceEnd != 0 { - beg += fenceEnd - break - } - - // copy the current line - end := skipUntilChar(data, beg, '\n') + 1 - - // did we reach the end of the buffer without a closing marker? - if end >= len(data) { - return 0 - } - - // verbatim copy to the working buffer - if doRender { - work.Write(data[beg:end]) - } - beg = end - } - - if doRender { - block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer - block.IsFenced = true - block.FenceLength = fenceLength - finalizeCodeBlock(block) - } - - return beg -} - -func unescapeChar(str []byte) []byte { - if str[0] == '\\' { - return []byte{str[1]} - } - return []byte(html.UnescapeString(string(str))) -} - -func unescapeString(str []byte) []byte { - if reBackslashOrAmp.Match(str) { - return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar) - } - return str -} - -func finalizeCodeBlock(block *Node) { - if block.IsFenced { - newlinePos := bytes.IndexByte(block.content, '\n') - firstLine := block.content[:newlinePos] - rest := block.content[newlinePos+1:] - block.Info = unescapeString(bytes.Trim(firstLine, "\n")) - block.Literal = rest - } else { - block.Literal = block.content - } - block.content = nil -} - -func (p *Markdown) table(data []byte) int { - table := p.addBlock(Table, nil) - i, columns := p.tableHeader(data) - if i == 0 { - p.tip = table.Parent - table.Unlink() - return 0 - } - - p.addBlock(TableBody, nil) - - for i < len(data) { - pipes, rowStart := 0, i - for ; i < len(data) && data[i] != '\n'; i++ { - if data[i] == '|' { - pipes++ - } - } - - if pipes == 0 { - i = rowStart - break - } - - // include the newline in data sent to tableRow - if i < len(data) && data[i] == '\n' { - i++ - } - p.tableRow(data[rowStart:i], columns, false) - } - - return i -} - -// check if the specified position is preceded by an odd number of backslashes -func isBackslashEscaped(data []byte, i int) bool { - backslashes := 0 - for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { - backslashes++ - } - return backslashes&1 == 1 -} - -func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) { - i := 0 - colCount := 1 - for i = 0; i < len(data) && data[i] != '\n'; i++ { - if data[i] == '|' && !isBackslashEscaped(data, i) { - colCount++ - } - } - - // doesn't look like a table header - if colCount == 1 { - return - } - - // include the newline in the data sent to tableRow - j := i - if j < len(data) && data[j] == '\n' { - j++ - } - header := data[:j] - - // column count ignores pipes at beginning or end of line - if data[0] == '|' { - colCount-- - } - if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { - colCount-- - } - - columns = make([]CellAlignFlags, colCount) - - // move on to the header underline - i++ - if i >= len(data) { - return - } - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - i = skipChar(data, i, ' ') - - // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 - // and trailing | optional on last column - col := 0 - for i < len(data) && data[i] != '\n' { - dashes := 0 - - if data[i] == ':' { - i++ - columns[col] |= TableAlignmentLeft - dashes++ - } - for i < len(data) && data[i] == '-' { - i++ - dashes++ - } - if i < len(data) && data[i] == ':' { - i++ - columns[col] |= TableAlignmentRight - dashes++ - } - for i < len(data) && data[i] == ' ' { - i++ - } - if i == len(data) { - return - } - // end of column test is messy - switch { - case dashes < 3: - // not a valid column - return - - case data[i] == '|' && !isBackslashEscaped(data, i): - // marker found, now skip past trailing whitespace - col++ - i++ - for i < len(data) && data[i] == ' ' { - i++ - } - - // trailing junk found after last column - if col >= colCount && i < len(data) && data[i] != '\n' { - return - } - - case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: - // something else found where marker was required - return - - case data[i] == '\n': - // marker is optional for the last column - col++ - - default: - // trailing junk found after last column - return - } - } - if col != colCount { - return - } - - p.addBlock(TableHead, nil) - p.tableRow(header, columns, true) - size = i - if size < len(data) && data[size] == '\n' { - size++ - } - return -} - -func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) { - p.addBlock(TableRow, nil) - i, col := 0, 0 - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - - for col = 0; col < len(columns) && i < len(data); col++ { - for i < len(data) && data[i] == ' ' { - i++ - } - - cellStart := i - - for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { - i++ - } - - cellEnd := i - - // skip the end-of-cell marker, possibly taking us past end of buffer - i++ - - for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' { - cellEnd-- - } - - cell := p.addBlock(TableCell, data[cellStart:cellEnd]) - cell.IsHeader = header - cell.Align = columns[col] - } - - // pad it out with empty columns to get the right number - for ; col < len(columns); col++ { - cell := p.addBlock(TableCell, nil) - cell.IsHeader = header - cell.Align = columns[col] - } - - // silently ignore rows with too many cells -} - -// returns blockquote prefix length -func (p *Markdown) quotePrefix(data []byte) int { - i := 0 - for i < 3 && i < len(data) && data[i] == ' ' { - i++ - } - if i < len(data) && data[i] == '>' { - if i+1 < len(data) && data[i+1] == ' ' { - return i + 2 - } - return i + 1 - } - return 0 -} - -// blockquote ends with at least one blank line -// followed by something without a blockquote prefix -func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool { - if p.isEmpty(data[beg:]) <= 0 { - return false - } - if end >= len(data) { - return true - } - return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 -} - -// parse a blockquote fragment -func (p *Markdown) quote(data []byte) int { - block := p.addBlock(BlockQuote, nil) - var raw bytes.Buffer - beg, end := 0, 0 - for beg < len(data) { - end = beg - // Step over whole lines, collecting them. While doing that, check for - // fenced code and if one's found, incorporate it altogether, - // irregardless of any contents inside it - for end < len(data) && data[end] != '\n' { - if p.extensions&FencedCode != 0 { - if i := p.fencedCodeBlock(data[end:], false); i > 0 { - // -1 to compensate for the extra end++ after the loop: - end += i - 1 - break - } - } - end++ - } - if end < len(data) && data[end] == '\n' { - end++ - } - if pre := p.quotePrefix(data[beg:]); pre > 0 { - // skip the prefix - beg += pre - } else if p.terminateBlockquote(data, beg, end) { - break - } - // this line is part of the blockquote - raw.Write(data[beg:end]) - beg = end - } - p.block(raw.Bytes()) - p.finalize(block) - return end -} - -// returns prefix length for block code -func (p *Markdown) codePrefix(data []byte) int { - if len(data) >= 1 && data[0] == '\t' { - return 1 - } - if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { - return 4 - } - return 0 -} - -func (p *Markdown) code(data []byte) int { - var work bytes.Buffer - - i := 0 - for i < len(data) { - beg := i - for i < len(data) && data[i] != '\n' { - i++ - } - if i < len(data) && data[i] == '\n' { - i++ - } - - blankline := p.isEmpty(data[beg:i]) > 0 - if pre := p.codePrefix(data[beg:i]); pre > 0 { - beg += pre - } else if !blankline { - // non-empty, non-prefixed line breaks the pre - i = beg - break - } - - // verbatim copy to the working buffer - if blankline { - work.WriteByte('\n') - } else { - work.Write(data[beg:i]) - } - } - - // trim all the \n off the end of work - workbytes := work.Bytes() - eol := len(workbytes) - for eol > 0 && workbytes[eol-1] == '\n' { - eol-- - } - if eol != len(workbytes) { - work.Truncate(eol) - } - - work.WriteByte('\n') - - block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer - block.IsFenced = false - finalizeCodeBlock(block) - - return i -} - -// returns unordered list item prefix -func (p *Markdown) uliPrefix(data []byte) int { - i := 0 - // start with up to 3 spaces - for i < len(data) && i < 3 && data[i] == ' ' { - i++ - } - if i >= len(data)-1 { - return 0 - } - // need one of {'*', '+', '-'} followed by a space or a tab - if (data[i] != '*' && data[i] != '+' && data[i] != '-') || - (data[i+1] != ' ' && data[i+1] != '\t') { - return 0 - } - return i + 2 -} - -// returns ordered list item prefix -func (p *Markdown) oliPrefix(data []byte) int { - i := 0 - - // start with up to 3 spaces - for i < 3 && i < len(data) && data[i] == ' ' { - i++ - } - - // count the digits - start := i - for i < len(data) && data[i] >= '0' && data[i] <= '9' { - i++ - } - if start == i || i >= len(data)-1 { - return 0 - } - - // we need >= 1 digits followed by a dot and a space or a tab - if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { - return 0 - } - return i + 2 -} - -// returns definition list item prefix -func (p *Markdown) dliPrefix(data []byte) int { - if len(data) < 2 { - return 0 - } - i := 0 - // need a ':' followed by a space or a tab - if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { - return 0 - } - for i < len(data) && data[i] == ' ' { - i++ - } - return i + 2 -} - -// parse ordered or unordered list block -func (p *Markdown) list(data []byte, flags ListType) int { - i := 0 - flags |= ListItemBeginningOfList - block := p.addBlock(List, nil) - block.ListFlags = flags - block.Tight = true - - for i < len(data) { - skip := p.listItem(data[i:], &flags) - if flags&ListItemContainsBlock != 0 { - block.ListData.Tight = false - } - i += skip - if skip == 0 || flags&ListItemEndOfList != 0 { - break - } - flags &= ^ListItemBeginningOfList - } - - above := block.Parent - finalizeList(block) - p.tip = above - return i -} - -// Returns true if the list item is not the same type as its parent list -func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool { - if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 { - return true - } else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 { - return true - } else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) { - return true - } - return false -} - -// Returns true if block ends with a blank line, descending if needed -// into lists and sublists. -func endsWithBlankLine(block *Node) bool { - // TODO: figure this out. Always false now. - for block != nil { - //if block.lastLineBlank { - //return true - //} - t := block.Type - if t == List || t == Item { - block = block.LastChild - } else { - break - } - } - return false -} - -func finalizeList(block *Node) { - block.open = false - item := block.FirstChild - for item != nil { - // check for non-final list item ending with blank line: - if endsWithBlankLine(item) && item.Next != nil { - block.ListData.Tight = false - break - } - // recurse into children of list item, to see if there are spaces - // between any of them: - subItem := item.FirstChild - for subItem != nil { - if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) { - block.ListData.Tight = false - break - } - subItem = subItem.Next - } - item = item.Next - } -} - -// Parse a single list item. -// Assumes initial prefix is already removed if this is a sublist. -func (p *Markdown) listItem(data []byte, flags *ListType) int { - // keep track of the indentation of the first line - itemIndent := 0 - if data[0] == '\t' { - itemIndent += 4 - } else { - for itemIndent < 3 && data[itemIndent] == ' ' { - itemIndent++ - } - } - - var bulletChar byte = '*' - i := p.uliPrefix(data) - if i == 0 { - i = p.oliPrefix(data) - } else { - bulletChar = data[i-2] - } - if i == 0 { - i = p.dliPrefix(data) - // reset definition term flag - if i > 0 { - *flags &= ^ListTypeTerm - } - } - if i == 0 { - // if in definition list, set term flag and continue - if *flags&ListTypeDefinition != 0 { - *flags |= ListTypeTerm - } else { - return 0 - } - } - - // skip leading whitespace on first line - for i < len(data) && data[i] == ' ' { - i++ - } - - // find the end of the line - line := i - for i > 0 && i < len(data) && data[i-1] != '\n' { - i++ - } - - // get working buffer - var raw bytes.Buffer - - // put the first line into the working buffer - raw.Write(data[line:i]) - line = i - - // process the following lines - containsBlankLine := false - sublist := 0 - codeBlockMarker := "" - -gatherlines: - for line < len(data) { - i++ - - // find the end of this line - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // if it is an empty line, guess that it is part of this item - // and move on to the next line - if p.isEmpty(data[line:i]) > 0 { - containsBlankLine = true - line = i - continue - } - - // calculate the indentation - indent := 0 - indentIndex := 0 - if data[line] == '\t' { - indentIndex++ - indent += 4 - } else { - for indent < 4 && line+indent < i && data[line+indent] == ' ' { - indent++ - indentIndex++ - } - } - - chunk := data[line+indentIndex : i] - - if p.extensions&FencedCode != 0 { - // determine if in or out of codeblock - // if in codeblock, ignore normal list processing - _, marker := isFenceLine(chunk, nil, codeBlockMarker) - if marker != "" { - if codeBlockMarker == "" { - // start of codeblock - codeBlockMarker = marker - } else { - // end of codeblock. - codeBlockMarker = "" - } - } - // we are in a codeblock, write line, and continue - if codeBlockMarker != "" || marker != "" { - raw.Write(data[line+indentIndex : i]) - line = i - continue gatherlines - } - } - - // evaluate how this line fits in - switch { - // is this a nested list item? - case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || - p.oliPrefix(chunk) > 0 || - p.dliPrefix(chunk) > 0: - - // to be a nested list, it must be indented more - // if not, it is either a different kind of list - // or the next item in the same list - if indent <= itemIndent { - if p.listTypeChanged(chunk, flags) { - *flags |= ListItemEndOfList - } else if containsBlankLine { - *flags |= ListItemContainsBlock - } - - break gatherlines - } - - if containsBlankLine { - *flags |= ListItemContainsBlock - } - - // is this the first item in the nested list? - if sublist == 0 { - sublist = raw.Len() - } - - // is this a nested prefix heading? - case p.isPrefixHeading(chunk): - // if the heading is not indented, it is not nested in the list - // and thus ends the list - if containsBlankLine && indent < 4 { - *flags |= ListItemEndOfList - break gatherlines - } - *flags |= ListItemContainsBlock - - // anything following an empty line is only part - // of this item if it is indented 4 spaces - // (regardless of the indentation of the beginning of the item) - case containsBlankLine && indent < 4: - if *flags&ListTypeDefinition != 0 && i < len(data)-1 { - // is the next item still a part of this list? - next := i - for next < len(data) && data[next] != '\n' { - next++ - } - for next < len(data)-1 && data[next] == '\n' { - next++ - } - if i < len(data)-1 && data[i] != ':' && data[next] != ':' { - *flags |= ListItemEndOfList - } - } else { - *flags |= ListItemEndOfList - } - break gatherlines - - // a blank line means this should be parsed as a block - case containsBlankLine: - raw.WriteByte('\n') - *flags |= ListItemContainsBlock - } - - // if this line was preceded by one or more blanks, - // re-introduce the blank into the buffer - if containsBlankLine { - containsBlankLine = false - raw.WriteByte('\n') - } - - // add the line into the working buffer without prefix - raw.Write(data[line+indentIndex : i]) - - line = i - } - - rawBytes := raw.Bytes() - - block := p.addBlock(Item, nil) - block.ListFlags = *flags - block.Tight = false - block.BulletChar = bulletChar - block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark - - // render the contents of the list item - if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 { - // intermediate render of block item, except for definition term - if sublist > 0 { - p.block(rawBytes[:sublist]) - p.block(rawBytes[sublist:]) - } else { - p.block(rawBytes) - } - } else { - // intermediate render of inline item - if sublist > 0 { - child := p.addChild(Paragraph, 0) - child.content = rawBytes[:sublist] - p.block(rawBytes[sublist:]) - } else { - child := p.addChild(Paragraph, 0) - child.content = rawBytes - } - } - return line -} - -// render a single paragraph that has already been parsed out -func (p *Markdown) renderParagraph(data []byte) { - if len(data) == 0 { - return - } - - // trim leading spaces - beg := 0 - for data[beg] == ' ' { - beg++ - } - - end := len(data) - // trim trailing newline - if data[len(data)-1] == '\n' { - end-- - } - - // trim trailing spaces - for end > beg && data[end-1] == ' ' { - end-- - } - - p.addBlock(Paragraph, data[beg:end]) -} - -func (p *Markdown) paragraph(data []byte) int { - // prev: index of 1st char of previous line - // line: index of 1st char of current line - // i: index of cursor/end of current line - var prev, line, i int - tabSize := TabSizeDefault - if p.extensions&TabSizeEight != 0 { - tabSize = TabSizeDouble - } - // keep going until we find something to mark the end of the paragraph - for i < len(data) { - // mark the beginning of the current line - prev = line - current := data[i:] - line = i - - // did we find a reference or a footnote? If so, end a paragraph - // preceding it and report that we have consumed up to the end of that - // reference: - if refEnd := isReference(p, current, tabSize); refEnd > 0 { - p.renderParagraph(data[:i]) - return i + refEnd - } - - // did we find a blank line marking the end of the paragraph? - if n := p.isEmpty(current); n > 0 { - // did this blank line followed by a definition list item? - if p.extensions&DefinitionLists != 0 { - if i < len(data)-1 && data[i+1] == ':' { - return p.list(data[prev:], ListTypeDefinition) - } - } - - p.renderParagraph(data[:i]) - return i + n - } - - // an underline under some text marks a heading, so our paragraph ended on prev line - if i > 0 { - if level := p.isUnderlinedHeading(current); level > 0 { - // render the paragraph - p.renderParagraph(data[:prev]) - - // ignore leading and trailing whitespace - eol := i - 1 - for prev < eol && data[prev] == ' ' { - prev++ - } - for eol > prev && data[eol-1] == ' ' { - eol-- - } - - id := "" - if p.extensions&AutoHeadingIDs != 0 { - id = SanitizedAnchorName(string(data[prev:eol])) - } - - block := p.addBlock(Heading, data[prev:eol]) - block.Level = level - block.HeadingID = id - - // find the end of the underline - for i < len(data) && data[i] != '\n' { - i++ - } - return i - } - } - - // if the next line starts a block of HTML, then the paragraph ends here - if p.extensions&LaxHTMLBlocks != 0 { - if data[i] == '<' && p.html(current, false) > 0 { - // rewind to before the HTML block - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a prefixed heading or a horizontal rule after this, paragraph is over - if p.isPrefixHeading(current) || p.isHRule(current) { - p.renderParagraph(data[:i]) - return i - } - - // if there's a fenced code block, paragraph is over - if p.extensions&FencedCode != 0 { - if p.fencedCodeBlock(current, false) > 0 { - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a definition list item, prev line is a definition term - if p.extensions&DefinitionLists != 0 { - if p.dliPrefix(current) != 0 { - ret := p.list(data[prev:], ListTypeDefinition) - return ret - } - } - - // if there's a list after this, paragraph is over - if p.extensions&NoEmptyLineBeforeBlock != 0 { - if p.uliPrefix(current) != 0 || - p.oliPrefix(current) != 0 || - p.quotePrefix(current) != 0 || - p.codePrefix(current) != 0 { - p.renderParagraph(data[:i]) - return i - } - } - - // otherwise, scan to the beginning of the next line - nl := bytes.IndexByte(data[i:], '\n') - if nl >= 0 { - i += nl + 1 - } else { - i += len(data[i:]) - } - } - - p.renderParagraph(data[:i]) - return i -} - -func skipChar(data []byte, start int, char byte) int { - i := start - for i < len(data) && data[i] == char { - i++ - } - return i -} - -func skipUntilChar(text []byte, start int, char byte) int { - i := start - for i < len(text) && text[i] != char { - i++ - } - return i -} - -// SanitizedAnchorName returns a sanitized anchor name for the given text. -// -// It implements the algorithm specified in the package comment. -func SanitizedAnchorName(text string) string { - var anchorName []rune - futureDash := false - for _, r := range text { - switch { - case unicode.IsLetter(r) || unicode.IsNumber(r): - if futureDash && len(anchorName) > 0 { - anchorName = append(anchorName, '-') - } - futureDash = false - anchorName = append(anchorName, unicode.ToLower(r)) - default: - futureDash = true - } - } - return string(anchorName) -} diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go deleted file mode 100644 index 57ff152a05..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/doc.go +++ /dev/null @@ -1,46 +0,0 @@ -// Package blackfriday is a markdown processor. -// -// It translates plain text with simple formatting rules into an AST, which can -// then be further processed to HTML (provided by Blackfriday itself) or other -// formats (provided by the community). -// -// The simplest way to invoke Blackfriday is to call the Run function. It will -// take a text input and produce a text output in HTML (or other format). -// -// A slightly more sophisticated way to use Blackfriday is to create a Markdown -// processor and to call Parse, which returns a syntax tree for the input -// document. You can leverage Blackfriday's parsing for content extraction from -// markdown documents. You can assign a custom renderer and set various options -// to the Markdown processor. -// -// If you're interested in calling Blackfriday from command line, see -// https://github.com/russross/blackfriday-tool. -// -// Sanitized Anchor Names -// -// Blackfriday includes an algorithm for creating sanitized anchor names -// corresponding to a given input text. This algorithm is used to create -// anchors for headings when AutoHeadingIDs extension is enabled. The -// algorithm is specified below, so that other packages can create -// compatible anchor names and links to those anchors. -// -// The algorithm iterates over the input text, interpreted as UTF-8, -// one Unicode code point (rune) at a time. All runes that are letters (category L) -// or numbers (category N) are considered valid characters. They are mapped to -// lower case, and included in the output. All other runes are considered -// invalid characters. Invalid characters that precede the first valid character, -// as well as invalid character that follow the last valid character -// are dropped completely. All other sequences of invalid characters -// between two valid characters are replaced with a single dash character '-'. -// -// SanitizedAnchorName exposes this functionality, and can be used to -// create compatible links to the anchor names generated by blackfriday. -// This algorithm is also implemented in a small standalone package at -// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients -// that want a small package and don't need full functionality of blackfriday. -package blackfriday - -// NOTE: Keep Sanitized Anchor Name algorithm in sync with package -// github.com/shurcooL/sanitized_anchor_name. -// Otherwise, users of sanitized_anchor_name will get anchor names -// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/v2/entities.go b/vendor/github.com/russross/blackfriday/v2/entities.go deleted file mode 100644 index a2c3edb691..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/entities.go +++ /dev/null @@ -1,2236 +0,0 @@ -package blackfriday - -// Extracted from https://html.spec.whatwg.org/multipage/entities.json -var entities = map[string]bool{ - "Æ": true, - "Æ": true, - "&": true, - "&": true, - "Á": true, - "Á": true, - "Ă": true, - "Â": true, - "Â": true, - "А": true, - "𝔄": true, - "À": true, - "À": true, - "Α": true, - "Ā": true, - "⩓": true, - "Ą": true, - "𝔸": true, - "⁡": true, - "Å": true, - "Å": true, - "𝒜": true, - "≔": true, - "Ã": true, - "Ã": true, - "Ä": true, - "Ä": true, - "∖": true, - "⫧": true, - "⌆": true, - "Б": true, - "∵": true, - "ℬ": true, - "Β": true, - "𝔅": true, - "𝔹": true, - "˘": true, - "ℬ": true, - "≎": true, - "Ч": true, - "©": true, - "©": true, - "Ć": true, - "⋒": true, - "ⅅ": true, - "ℭ": true, - "Č": true, - "Ç": true, - "Ç": true, - "Ĉ": true, - "∰": true, - "Ċ": true, - "¸": true, - "·": true, - "ℭ": true, - "Χ": true, - "⊙": true, - "⊖": true, - "⊕": true, - "⊗": true, - "∲": true, - "”": true, - "’": true, - "∷": true, - "⩴": true, - "≡": true, - "∯": true, - "∮": true, - "ℂ": true, - "∐": true, - "∳": true, - "⨯": true, - "𝒞": true, - "⋓": true, - "≍": true, - "ⅅ": true, - "⤑": true, - "Ђ": true, - "Ѕ": true, - "Џ": true, - "‡": true, - "↡": true, - "⫤": true, - "Ď": true, - "Д": true, - "∇": true, - "Δ": true, - "𝔇": true, - "´": true, - "˙": true, - "˝": true, - "`": true, - "˜": true, - "⋄": true, - "ⅆ": true, - "𝔻": true, - "¨": true, - "⃜": true, - "≐": true, - "∯": true, - "¨": true, - "⇓": true, - "⇐": true, - "⇔": true, - "⫤": true, - "⟸": true, - "⟺": true, - "⟹": true, - "⇒": true, - "⊨": true, - "⇑": true, - "⇕": true, - "∥": true, - "↓": true, - "⤓": true, - "⇵": true, - "̑": true, - "⥐": true, - "⥞": true, - "↽": true, - "⥖": true, - "⥟": true, - "⇁": true, - "⥗": true, - "⊤": true, - "↧": true, - "⇓": true, - "𝒟": true, - "Đ": true, - "Ŋ": true, - "Ð": true, - "Ð": true, - "É": true, - "É": true, - "Ě": true, - "Ê": true, - "Ê": true, - "Э": true, - "Ė": true, - "𝔈": true, - "È": true, - "È": true, - "∈": true, - "Ē": true, - "◻": true, - "▫": true, - "Ę": true, - "𝔼": true, - "Ε": true, - "⩵": true, - "≂": true, - "⇌": true, - "ℰ": true, - "⩳": true, - "Η": true, - "Ë": true, - "Ë": true, - "∃": true, - "ⅇ": true, - "Ф": true, - "𝔉": true, - "◼": true, - "▪": true, - "𝔽": true, - "∀": true, - "ℱ": true, - "ℱ": true, - "Ѓ": true, - ">": true, - ">": true, - "Γ": true, - "Ϝ": true, - "Ğ": true, - "Ģ": true, - "Ĝ": true, - "Г": true, - "Ġ": true, - "𝔊": true, - "⋙": true, - "𝔾": true, - "≥": true, - "⋛": true, - "≧": true, - "⪢": true, - "≷": true, - "⩾": true, - "≳": true, - "𝒢": true, - "≫": true, - "Ъ": true, - "ˇ": true, - "^": true, - "Ĥ": true, - "ℌ": true, - "ℋ": true, - "ℍ": true, - "─": true, - "ℋ": true, - "Ħ": true, - "≎": true, - "≏": true, - "Е": true, - "IJ": true, - "Ё": true, - "Í": true, - "Í": true, - "Î": true, - "Î": true, - "И": true, - "İ": true, - "ℑ": true, - "Ì": true, - "Ì": true, - "ℑ": true, - "Ī": true, - "ⅈ": true, - "⇒": true, - "∬": true, - "∫": true, - "⋂": true, - "⁣": true, - "⁢": true, - "Į": true, - "𝕀": true, - "Ι": true, - "ℐ": true, - "Ĩ": true, - "І": true, - "Ï": true, - "Ï": true, - "Ĵ": true, - "Й": true, - "𝔍": true, - "𝕁": true, - "𝒥": true, - "Ј": true, - "Є": true, - "Х": true, - "Ќ": true, - "Κ": true, - "Ķ": true, - "К": true, - "𝔎": true, - "𝕂": true, - "𝒦": true, - "Љ": true, - "<": true, - "<": true, - "Ĺ": true, - "Λ": true, - "⟪": true, - "ℒ": true, - "↞": true, - "Ľ": true, - "Ļ": true, - "Л": true, - "⟨": true, - "←": true, - "⇤": true, - "⇆": true, - "⌈": true, - "⟦": true, - "⥡": true, - "⇃": true, - "⥙": true, - "⌊": true, - "↔": true, - "⥎": true, - "⊣": true, - "↤": true, - "⥚": true, - "⊲": true, - "⧏": true, - "⊴": true, - "⥑": true, - "⥠": true, - "↿": true, - "⥘": true, - "↼": true, - "⥒": true, - "⇐": true, - "⇔": true, - "⋚": true, - "≦": true, - "≶": true, - "⪡": true, - "⩽": true, - "≲": true, - "𝔏": true, - "⋘": true, - "⇚": true, - "Ŀ": true, - "⟵": true, - "⟷": true, - "⟶": true, - "⟸": true, - "⟺": true, - "⟹": true, - "𝕃": true, - "↙": true, - "↘": true, - "ℒ": true, - "↰": true, - "Ł": true, - "≪": true, - "⤅": true, - "М": true, - " ": true, - "ℳ": true, - "𝔐": true, - "∓": true, - "𝕄": true, - "ℳ": true, - "Μ": true, - "Њ": true, - "Ń": true, - "Ň": true, - "Ņ": true, - "Н": true, - "​": true, - "​": true, - "​": true, - "​": true, - "≫": true, - "≪": true, - " ": true, - "𝔑": true, - "⁠": true, - " ": true, - "ℕ": true, - "⫬": true, - "≢": true, - "≭": true, - "∦": true, - "∉": true, - "≠": true, - "≂̸": true, - "∄": true, - "≯": true, - "≱": true, - "≧̸": true, - "≫̸": true, - "≹": true, - "⩾̸": true, - "≵": true, - "≎̸": true, - "≏̸": true, - "⋪": true, - "⧏̸": true, - "⋬": true, - "≮": true, - "≰": true, - "≸": true, - "≪̸": true, - "⩽̸": true, - "≴": true, - "⪢̸": true, - "⪡̸": true, - "⊀": true, - "⪯̸": true, - "⋠": true, - "∌": true, - "⋫": true, - "⧐̸": true, - "⋭": true, - "⊏̸": true, - "⋢": true, - "⊐̸": true, - "⋣": true, - "⊂⃒": true, - "⊈": true, - "⊁": true, - "⪰̸": true, - "⋡": true, - "≿̸": true, - "⊃⃒": true, - "⊉": true, - "≁": true, - "≄": true, - "≇": true, - "≉": true, - "∤": true, - "𝒩": true, - "Ñ": true, - "Ñ": true, - "Ν": true, - "Œ": true, - "Ó": true, - "Ó": true, - "Ô": true, - "Ô": true, - "О": true, - "Ő": true, - "𝔒": true, - "Ò": true, - "Ò": true, - "Ō": true, - "Ω": true, - "Ο": true, - "𝕆": true, - "“": true, - "‘": true, - "⩔": true, - "𝒪": true, - "Ø": true, - "Ø": true, - "Õ": true, - "Õ": true, - "⨷": true, - "Ö": true, - "Ö": true, - "‾": true, - "⏞": true, - "⎴": true, - "⏜": true, - "∂": true, - "П": true, - "𝔓": true, - "Φ": true, - "Π": true, - "±": true, - "ℌ": true, - "ℙ": true, - "⪻": true, - "≺": true, - "⪯": true, - "≼": true, - "≾": true, - "″": true, - "∏": true, - "∷": true, - "∝": true, - "𝒫": true, - "Ψ": true, - """: true, - """: true, - "𝔔": true, - "ℚ": true, - "𝒬": true, - "⤐": true, - "®": true, - "®": true, - "Ŕ": true, - "⟫": true, - "↠": true, - "⤖": true, - "Ř": true, - "Ŗ": true, - "Р": true, - "ℜ": true, - "∋": true, - "⇋": true, - "⥯": true, - "ℜ": true, - "Ρ": true, - "⟩": true, - "→": true, - "⇥": true, - "⇄": true, - "⌉": true, - "⟧": true, - "⥝": true, - "⇂": true, - "⥕": true, - "⌋": true, - "⊢": true, - "↦": true, - "⥛": true, - "⊳": true, - "⧐": true, - "⊵": true, - "⥏": true, - "⥜": true, - "↾": true, - "⥔": true, - "⇀": true, - "⥓": true, - "⇒": true, - "ℝ": true, - "⥰": true, - "⇛": true, - "ℛ": true, - "↱": true, - "⧴": true, - "Щ": true, - "Ш": true, - "Ь": true, - "Ś": true, - "⪼": true, - "Š": true, - "Ş": true, - "Ŝ": true, - "С": true, - "𝔖": true, - "↓": true, - "←": true, - "→": true, - "↑": true, - "Σ": true, - "∘": true, - "𝕊": true, - "√": true, - "□": true, - "⊓": true, - "⊏": true, - "⊑": true, - "⊐": true, - "⊒": true, - "⊔": true, - "𝒮": true, - "⋆": true, - "⋐": true, - "⋐": true, - "⊆": true, - "≻": true, - "⪰": true, - "≽": true, - "≿": true, - "∋": true, - "∑": true, - "⋑": true, - "⊃": true, - "⊇": true, - "⋑": true, - "Þ": true, - "Þ": true, - "™": true, - "Ћ": true, - "Ц": true, - " ": true, - "Τ": true, - "Ť": true, - "Ţ": true, - "Т": true, - "𝔗": true, - "∴": true, - "Θ": true, - "  ": true, - " ": true, - "∼": true, - "≃": true, - "≅": true, - "≈": true, - "𝕋": true, - "⃛": true, - "𝒯": true, - "Ŧ": true, - "Ú": true, - "Ú": true, - "↟": true, - "⥉": true, - "Ў": true, - "Ŭ": true, - "Û": true, - "Û": true, - "У": true, - "Ű": true, - "𝔘": true, - "Ù": true, - "Ù": true, - "Ū": true, - "_": true, - "⏟": true, - "⎵": true, - "⏝": true, - "⋃": true, - "⊎": true, - "Ų": true, - "𝕌": true, - "↑": true, - "⤒": true, - "⇅": true, - "↕": true, - "⥮": true, - "⊥": true, - "↥": true, - "⇑": true, - "⇕": true, - "↖": true, - "↗": true, - "ϒ": true, - "Υ": true, - "Ů": true, - "𝒰": true, - "Ũ": true, - "Ü": true, - "Ü": true, - "⊫": true, - "⫫": true, - "В": true, - "⊩": true, - "⫦": true, - "⋁": true, - "‖": true, - "‖": true, - "∣": true, - "|": true, - "❘": true, - "≀": true, - " ": true, - "𝔙": true, - "𝕍": true, - "𝒱": true, - "⊪": true, - "Ŵ": true, - "⋀": true, - "𝔚": true, - "𝕎": true, - "𝒲": true, - "𝔛": true, - "Ξ": true, - "𝕏": true, - "𝒳": true, - "Я": true, - "Ї": true, - "Ю": true, - "Ý": true, - "Ý": true, - "Ŷ": true, - "Ы": true, - "𝔜": true, - "𝕐": true, - "𝒴": true, - "Ÿ": true, - "Ж": true, - "Ź": true, - "Ž": true, - "З": true, - "Ż": true, - "​": true, - "Ζ": true, - "ℨ": true, - "ℤ": true, - "𝒵": true, - "á": true, - "á": true, - "ă": true, - "∾": true, - "∾̳": true, - "∿": true, - "â": true, - "â": true, - "´": true, - "´": true, - "а": true, - "æ": true, - "æ": true, - "⁡": true, - "𝔞": true, - "à": true, - "à": true, - "ℵ": true, - "ℵ": true, - "α": true, - "ā": true, - "⨿": true, - "&": true, - "&": true, - "∧": true, - "⩕": true, - "⩜": true, - "⩘": true, - "⩚": true, - "∠": true, - "⦤": true, - "∠": true, - "∡": true, - "⦨": true, - "⦩": true, - "⦪": true, - "⦫": true, - "⦬": true, - "⦭": true, - "⦮": true, - "⦯": true, - "∟": true, - "⊾": true, - "⦝": true, - "∢": true, - "Å": true, - "⍼": true, - "ą": true, - "𝕒": true, - "≈": true, - "⩰": true, - "⩯": true, - "≊": true, - "≋": true, - "'": true, - "≈": true, - "≊": true, - "å": true, - "å": true, - "𝒶": true, - "*": true, - "≈": true, - "≍": true, - "ã": true, - "ã": true, - "ä": true, - "ä": true, - "∳": true, - "⨑": true, - "⫭": true, - "≌": true, - "϶": true, - "‵": true, - "∽": true, - "⋍": true, - "⊽": true, - "⌅": true, - "⌅": true, - "⎵": true, - "⎶": true, - "≌": true, - "б": true, - "„": true, - "∵": true, - "∵": true, - "⦰": true, - "϶": true, - "ℬ": true, - "β": true, - "ℶ": true, - "≬": true, - "𝔟": true, - "⋂": true, - "◯": true, - "⋃": true, - "⨀": true, - "⨁": true, - "⨂": true, - "⨆": true, - "★": true, - "▽": true, - "△": true, - "⨄": true, - "⋁": true, - "⋀": true, - "⤍": true, - "⧫": true, - "▪": true, - "▴": true, - "▾": true, - "◂": true, - "▸": true, - "␣": true, - "▒": true, - "░": true, - "▓": true, - "█": true, - "=⃥": true, - "≡⃥": true, - "⌐": true, - "𝕓": true, - "⊥": true, - "⊥": true, - "⋈": true, - "╗": true, - "╔": true, - "╖": true, - "╓": true, - "═": true, - "╦": true, - "╩": true, - "╤": true, - "╧": true, - "╝": true, - "╚": true, - "╜": true, - "╙": true, - "║": true, - "╬": true, - "╣": true, - "╠": true, - "╫": true, - "╢": true, - "╟": true, - "⧉": true, - "╕": true, - "╒": true, - "┐": true, - "┌": true, - "─": true, - "╥": true, - "╨": true, - "┬": true, - "┴": true, - "⊟": true, - "⊞": true, - "⊠": true, - "╛": true, - "╘": true, - "┘": true, - "└": true, - "│": true, - "╪": true, - "╡": true, - "╞": true, - "┼": true, - "┤": true, - "├": true, - "‵": true, - "˘": true, - "¦": true, - "¦": true, - "𝒷": true, - "⁏": true, - "∽": true, - "⋍": true, - "\": true, - "⧅": true, - "⟈": true, - "•": true, - "•": true, - "≎": true, - "⪮": true, - "≏": true, - "≏": true, - "ć": true, - "∩": true, - "⩄": true, - "⩉": true, - "⩋": true, - "⩇": true, - "⩀": true, - "∩︀": true, - "⁁": true, - "ˇ": true, - "⩍": true, - "č": true, - "ç": true, - "ç": true, - "ĉ": true, - "⩌": true, - "⩐": true, - "ċ": true, - "¸": true, - "¸": true, - "⦲": true, - "¢": true, - "¢": true, - "·": true, - "𝔠": true, - "ч": true, - "✓": true, - "✓": true, - "χ": true, - "○": true, - "⧃": true, - "ˆ": true, - "≗": true, - "↺": true, - "↻": true, - "®": true, - "Ⓢ": true, - "⊛": true, - "⊚": true, - "⊝": true, - "≗": true, - "⨐": true, - "⫯": true, - "⧂": true, - "♣": true, - "♣": true, - ":": true, - "≔": true, - "≔": true, - ",": true, - "@": true, - "∁": true, - "∘": true, - "∁": true, - "ℂ": true, - "≅": true, - "⩭": true, - "∮": true, - "𝕔": true, - "∐": true, - "©": true, - "©": true, - "℗": true, - "↵": true, - "✗": true, - "𝒸": true, - "⫏": true, - "⫑": true, - "⫐": true, - "⫒": true, - "⋯": true, - "⤸": true, - "⤵": true, - "⋞": true, - "⋟": true, - "↶": true, - "⤽": true, - "∪": true, - "⩈": true, - "⩆": true, - "⩊": true, - "⊍": true, - "⩅": true, - "∪︀": true, - "↷": true, - "⤼": true, - "⋞": true, - "⋟": true, - "⋎": true, - "⋏": true, - "¤": true, - "¤": true, - "↶": true, - "↷": true, - "⋎": true, - "⋏": true, - "∲": true, - "∱": true, - "⌭": true, - "⇓": true, - "⥥": true, - "†": true, - "ℸ": true, - "↓": true, - "‐": true, - "⊣": true, - "⤏": true, - "˝": true, - "ď": true, - "д": true, - "ⅆ": true, - "‡": true, - "⇊": true, - "⩷": true, - "°": true, - "°": true, - "δ": true, - "⦱": true, - "⥿": true, - "𝔡": true, - "⇃": true, - "⇂": true, - "⋄": true, - "⋄": true, - "♦": true, - "♦": true, - "¨": true, - "ϝ": true, - "⋲": true, - "÷": true, - "÷": true, - "÷": true, - "⋇": true, - "⋇": true, - "ђ": true, - "⌞": true, - "⌍": true, - "$": true, - "𝕕": true, - "˙": true, - "≐": true, - "≑": true, - "∸": true, - "∔": true, - "⊡": true, - "⌆": true, - "↓": true, - "⇊": true, - "⇃": true, - "⇂": true, - "⤐": true, - "⌟": true, - "⌌": true, - "𝒹": true, - "ѕ": true, - "⧶": true, - "đ": true, - "⋱": true, - "▿": true, - "▾": true, - "⇵": true, - "⥯": true, - "⦦": true, - "џ": true, - "⟿": true, - "⩷": true, - "≑": true, - "é": true, - "é": true, - "⩮": true, - "ě": true, - "≖": true, - "ê": true, - "ê": true, - "≕": true, - "э": true, - "ė": true, - "ⅇ": true, - "≒": true, - "𝔢": true, - "⪚": true, - "è": true, - "è": true, - "⪖": true, - "⪘": true, - "⪙": true, - "⏧": true, - "ℓ": true, - "⪕": true, - "⪗": true, - "ē": true, - "∅": true, - "∅": true, - "∅": true, - " ": true, - " ": true, - " ": true, - "ŋ": true, - " ": true, - "ę": true, - "𝕖": true, - "⋕": true, - "⧣": true, - "⩱": true, - "ε": true, - "ε": true, - "ϵ": true, - "≖": true, - "≕": true, - "≂": true, - "⪖": true, - "⪕": true, - "=": true, - "≟": true, - "≡": true, - "⩸": true, - "⧥": true, - "≓": true, - "⥱": true, - "ℯ": true, - "≐": true, - "≂": true, - "η": true, - "ð": true, - "ð": true, - "ë": true, - "ë": true, - "€": true, - "!": true, - "∃": true, - "ℰ": true, - "ⅇ": true, - "≒": true, - "ф": true, - "♀": true, - "ffi": true, - "ff": true, - "ffl": true, - "𝔣": true, - "fi": true, - "fj": true, - "♭": true, - "fl": true, - "▱": true, - "ƒ": true, - "𝕗": true, - "∀": true, - "⋔": true, - "⫙": true, - "⨍": true, - "½": true, - "½": true, - "⅓": true, - "¼": true, - "¼": true, - "⅕": true, - "⅙": true, - "⅛": true, - "⅔": true, - "⅖": true, - "¾": true, - "¾": true, - "⅗": true, - "⅜": true, - "⅘": true, - "⅚": true, - "⅝": true, - "⅞": true, - "⁄": true, - "⌢": true, - "𝒻": true, - "≧": true, - "⪌": true, - "ǵ": true, - "γ": true, - "ϝ": true, - "⪆": true, - "ğ": true, - "ĝ": true, - "г": true, - "ġ": true, - "≥": true, - "⋛": true, - "≥": true, - "≧": true, - "⩾": true, - "⩾": true, - "⪩": true, - "⪀": true, - "⪂": true, - "⪄": true, - "⋛︀": true, - "⪔": true, - "𝔤": true, - "≫": true, - "⋙": true, - "ℷ": true, - "ѓ": true, - "≷": true, - "⪒": true, - "⪥": true, - "⪤": true, - "≩": true, - "⪊": true, - "⪊": true, - "⪈": true, - "⪈": true, - "≩": true, - "⋧": true, - "𝕘": true, - "`": true, - "ℊ": true, - "≳": true, - "⪎": true, - "⪐": true, - ">": true, - ">": true, - "⪧": true, - "⩺": true, - "⋗": true, - "⦕": true, - "⩼": true, - "⪆": true, - "⥸": true, - "⋗": true, - "⋛": true, - "⪌": true, - "≷": true, - "≳": true, - "≩︀": true, - "≩︀": true, - "⇔": true, - " ": true, - "½": true, - "ℋ": true, - "ъ": true, - "↔": true, - "⥈": true, - "↭": true, - "ℏ": true, - "ĥ": true, - "♥": true, - "♥": true, - "…": true, - "⊹": true, - "𝔥": true, - "⤥": true, - "⤦": true, - "⇿": true, - "∻": true, - "↩": true, - "↪": true, - "𝕙": true, - "―": true, - "𝒽": true, - "ℏ": true, - "ħ": true, - "⁃": true, - "‐": true, - "í": true, - "í": true, - "⁣": true, - "î": true, - "î": true, - "и": true, - "е": true, - "¡": true, - "¡": true, - "⇔": true, - "𝔦": true, - "ì": true, - "ì": true, - "ⅈ": true, - "⨌": true, - "∭": true, - "⧜": true, - "℩": true, - "ij": true, - "ī": true, - "ℑ": true, - "ℐ": true, - "ℑ": true, - "ı": true, - "⊷": true, - "Ƶ": true, - "∈": true, - "℅": true, - "∞": true, - "⧝": true, - "ı": true, - "∫": true, - "⊺": true, - "ℤ": true, - "⊺": true, - "⨗": true, - "⨼": true, - "ё": true, - "į": true, - "𝕚": true, - "ι": true, - "⨼": true, - "¿": true, - "¿": true, - "𝒾": true, - "∈": true, - "⋹": true, - "⋵": true, - "⋴": true, - "⋳": true, - "∈": true, - "⁢": true, - "ĩ": true, - "і": true, - "ï": true, - "ï": true, - "ĵ": true, - "й": true, - "𝔧": true, - "ȷ": true, - "𝕛": true, - "𝒿": true, - "ј": true, - "є": true, - "κ": true, - "ϰ": true, - "ķ": true, - "к": true, - "𝔨": true, - "ĸ": true, - "х": true, - "ќ": true, - "𝕜": true, - "𝓀": true, - "⇚": true, - "⇐": true, - "⤛": true, - "⤎": true, - "≦": true, - "⪋": true, - "⥢": true, - "ĺ": true, - "⦴": true, - "ℒ": true, - "λ": true, - "⟨": true, - "⦑": true, - "⟨": true, - "⪅": true, - "«": true, - "«": true, - "←": true, - "⇤": true, - "⤟": true, - "⤝": true, - "↩": true, - "↫": true, - "⤹": true, - "⥳": true, - "↢": true, - "⪫": true, - "⤙": true, - "⪭": true, - "⪭︀": true, - "⤌": true, - "❲": true, - "{": true, - "[": true, - "⦋": true, - "⦏": true, - "⦍": true, - "ľ": true, - "ļ": true, - "⌈": true, - "{": true, - "л": true, - "⤶": true, - "“": true, - "„": true, - "⥧": true, - "⥋": true, - "↲": true, - "≤": true, - "←": true, - "↢": true, - "↽": true, - "↼": true, - "⇇": true, - "↔": true, - "⇆": true, - "⇋": true, - "↭": true, - "⋋": true, - "⋚": true, - "≤": true, - "≦": true, - "⩽": true, - "⩽": true, - "⪨": true, - "⩿": true, - "⪁": true, - "⪃": true, - "⋚︀": true, - "⪓": true, - "⪅": true, - "⋖": true, - "⋚": true, - "⪋": true, - "≶": true, - "≲": true, - "⥼": true, - "⌊": true, - "𝔩": true, - "≶": true, - "⪑": true, - "↽": true, - "↼": true, - "⥪": true, - "▄": true, - "љ": true, - "≪": true, - "⇇": true, - "⌞": true, - "⥫": true, - "◺": true, - "ŀ": true, - "⎰": true, - "⎰": true, - "≨": true, - "⪉": true, - "⪉": true, - "⪇": true, - "⪇": true, - "≨": true, - "⋦": true, - "⟬": true, - "⇽": true, - "⟦": true, - "⟵": true, - "⟷": true, - "⟼": true, - "⟶": true, - "↫": true, - "↬": true, - "⦅": true, - "𝕝": true, - "⨭": true, - "⨴": true, - "∗": true, - "_": true, - "◊": true, - "◊": true, - "⧫": true, - "(": true, - "⦓": true, - "⇆": true, - "⌟": true, - "⇋": true, - "⥭": true, - "‎": true, - "⊿": true, - "‹": true, - "𝓁": true, - "↰": true, - "≲": true, - "⪍": true, - "⪏": true, - "[": true, - "‘": true, - "‚": true, - "ł": true, - "<": true, - "<": true, - "⪦": true, - "⩹": true, - "⋖": true, - "⋋": true, - "⋉": true, - "⥶": true, - "⩻": true, - "⦖": true, - "◃": true, - "⊴": true, - "◂": true, - "⥊": true, - "⥦": true, - "≨︀": true, - "≨︀": true, - "∺": true, - "¯": true, - "¯": true, - "♂": true, - "✠": true, - "✠": true, - "↦": true, - "↦": true, - "↧": true, - "↤": true, - "↥": true, - "▮": true, - "⨩": true, - "м": true, - "—": true, - "∡": true, - "𝔪": true, - "℧": true, - "µ": true, - "µ": true, - "∣": true, - "*": true, - "⫰": true, - "·": true, - "·": true, - "−": true, - "⊟": true, - "∸": true, - "⨪": true, - "⫛": true, - "…": true, - "∓": true, - "⊧": true, - "𝕞": true, - "∓": true, - "𝓂": true, - "∾": true, - "μ": true, - "⊸": true, - "⊸": true, - "⋙̸": true, - "≫⃒": true, - "≫̸": true, - "⇍": true, - "⇎": true, - "⋘̸": true, - "≪⃒": true, - "≪̸": true, - "⇏": true, - "⊯": true, - "⊮": true, - "∇": true, - "ń": true, - "∠⃒": true, - "≉": true, - "⩰̸": true, - "≋̸": true, - "ʼn": true, - "≉": true, - "♮": true, - "♮": true, - "ℕ": true, - " ": true, - " ": true, - "≎̸": true, - "≏̸": true, - "⩃": true, - "ň": true, - "ņ": true, - "≇": true, - "⩭̸": true, - "⩂": true, - "н": true, - "–": true, - "≠": true, - "⇗": true, - "⤤": true, - "↗": true, - "↗": true, - "≐̸": true, - "≢": true, - "⤨": true, - "≂̸": true, - "∄": true, - "∄": true, - "𝔫": true, - "≧̸": true, - "≱": true, - "≱": true, - "≧̸": true, - "⩾̸": true, - "⩾̸": true, - "≵": true, - "≯": true, - "≯": true, - "⇎": true, - "↮": true, - "⫲": true, - "∋": true, - "⋼": true, - "⋺": true, - "∋": true, - "њ": true, - "⇍": true, - "≦̸": true, - "↚": true, - "‥": true, - "≰": true, - "↚": true, - "↮": true, - "≰": true, - "≦̸": true, - "⩽̸": true, - "⩽̸": true, - "≮": true, - "≴": true, - "≮": true, - "⋪": true, - "⋬": true, - "∤": true, - "𝕟": true, - "¬": true, - "¬": true, - "∉": true, - "⋹̸": true, - "⋵̸": true, - "∉": true, - "⋷": true, - "⋶": true, - "∌": true, - "∌": true, - "⋾": true, - "⋽": true, - "∦": true, - "∦": true, - "⫽⃥": true, - "∂̸": true, - "⨔": true, - "⊀": true, - "⋠": true, - "⪯̸": true, - "⊀": true, - "⪯̸": true, - "⇏": true, - "↛": true, - "⤳̸": true, - "↝̸": true, - "↛": true, - "⋫": true, - "⋭": true, - "⊁": true, - "⋡": true, - "⪰̸": true, - "𝓃": true, - "∤": true, - "∦": true, - "≁": true, - "≄": true, - "≄": true, - "∤": true, - "∦": true, - "⋢": true, - "⋣": true, - "⊄": true, - "⫅̸": true, - "⊈": true, - "⊂⃒": true, - "⊈": true, - "⫅̸": true, - "⊁": true, - "⪰̸": true, - "⊅": true, - "⫆̸": true, - "⊉": true, - "⊃⃒": true, - "⊉": true, - "⫆̸": true, - "≹": true, - "ñ": true, - "ñ": true, - "≸": true, - "⋪": true, - "⋬": true, - "⋫": true, - "⋭": true, - "ν": true, - "#": true, - "№": true, - " ": true, - "⊭": true, - "⤄": true, - "≍⃒": true, - "⊬": true, - "≥⃒": true, - ">⃒": true, - "⧞": true, - "⤂": true, - "≤⃒": true, - "<⃒": true, - "⊴⃒": true, - "⤃": true, - "⊵⃒": true, - "∼⃒": true, - "⇖": true, - "⤣": true, - "↖": true, - "↖": true, - "⤧": true, - "Ⓢ": true, - "ó": true, - "ó": true, - "⊛": true, - "⊚": true, - "ô": true, - "ô": true, - "о": true, - "⊝": true, - "ő": true, - "⨸": true, - "⊙": true, - "⦼": true, - "œ": true, - "⦿": true, - "𝔬": true, - "˛": true, - "ò": true, - "ò": true, - "⧁": true, - "⦵": true, - "Ω": true, - "∮": true, - "↺": true, - "⦾": true, - "⦻": true, - "‾": true, - "⧀": true, - "ō": true, - "ω": true, - "ο": true, - "⦶": true, - "⊖": true, - "𝕠": true, - "⦷": true, - "⦹": true, - "⊕": true, - "∨": true, - "↻": true, - "⩝": true, - "ℴ": true, - "ℴ": true, - "ª": true, - "ª": true, - "º": true, - "º": true, - "⊶": true, - "⩖": true, - "⩗": true, - "⩛": true, - "ℴ": true, - "ø": true, - "ø": true, - "⊘": true, - "õ": true, - "õ": true, - "⊗": true, - "⨶": true, - "ö": true, - "ö": true, - "⌽": true, - "∥": true, - "¶": true, - "¶": true, - "∥": true, - "⫳": true, - "⫽": true, - "∂": true, - "п": true, - "%": true, - ".": true, - "‰": true, - "⊥": true, - "‱": true, - "𝔭": true, - "φ": true, - "ϕ": true, - "ℳ": true, - "☎": true, - "π": true, - "⋔": true, - "ϖ": true, - "ℏ": true, - "ℎ": true, - "ℏ": true, - "+": true, - "⨣": true, - "⊞": true, - "⨢": true, - "∔": true, - "⨥": true, - "⩲": true, - "±": true, - "±": true, - "⨦": true, - "⨧": true, - "±": true, - "⨕": true, - "𝕡": true, - "£": true, - "£": true, - "≺": true, - "⪳": true, - "⪷": true, - "≼": true, - "⪯": true, - "≺": true, - "⪷": true, - "≼": true, - "⪯": true, - "⪹": true, - "⪵": true, - "⋨": true, - "≾": true, - "′": true, - "ℙ": true, - "⪵": true, - "⪹": true, - "⋨": true, - "∏": true, - "⌮": true, - "⌒": true, - "⌓": true, - "∝": true, - "∝": true, - "≾": true, - "⊰": true, - "𝓅": true, - "ψ": true, - " ": true, - "𝔮": true, - "⨌": true, - "𝕢": true, - "⁗": true, - "𝓆": true, - "ℍ": true, - "⨖": true, - "?": true, - "≟": true, - """: true, - """: true, - "⇛": true, - "⇒": true, - "⤜": true, - "⤏": true, - "⥤": true, - "∽̱": true, - "ŕ": true, - "√": true, - "⦳": true, - "⟩": true, - "⦒": true, - "⦥": true, - "⟩": true, - "»": true, - "»": true, - "→": true, - "⥵": true, - "⇥": true, - "⤠": true, - "⤳": true, - "⤞": true, - "↪": true, - "↬": true, - "⥅": true, - "⥴": true, - "↣": true, - "↝": true, - "⤚": true, - "∶": true, - "ℚ": true, - "⤍": true, - "❳": true, - "}": true, - "]": true, - "⦌": true, - "⦎": true, - "⦐": true, - "ř": true, - "ŗ": true, - "⌉": true, - "}": true, - "р": true, - "⤷": true, - "⥩": true, - "”": true, - "”": true, - "↳": true, - "ℜ": true, - "ℛ": true, - "ℜ": true, - "ℝ": true, - "▭": true, - "®": true, - "®": true, - "⥽": true, - "⌋": true, - "𝔯": true, - "⇁": true, - "⇀": true, - "⥬": true, - "ρ": true, - "ϱ": true, - "→": true, - "↣": true, - "⇁": true, - "⇀": true, - "⇄": true, - "⇌": true, - "⇉": true, - "↝": true, - "⋌": true, - "˚": true, - "≓": true, - "⇄": true, - "⇌": true, - "‏": true, - "⎱": true, - "⎱": true, - "⫮": true, - "⟭": true, - "⇾": true, - "⟧": true, - "⦆": true, - "𝕣": true, - "⨮": true, - "⨵": true, - ")": true, - "⦔": true, - "⨒": true, - "⇉": true, - "›": true, - "𝓇": true, - "↱": true, - "]": true, - "’": true, - "’": true, - "⋌": true, - "⋊": true, - "▹": true, - "⊵": true, - "▸": true, - "⧎": true, - "⥨": true, - "℞": true, - "ś": true, - "‚": true, - "≻": true, - "⪴": true, - "⪸": true, - "š": true, - "≽": true, - "⪰": true, - "ş": true, - "ŝ": true, - "⪶": true, - "⪺": true, - "⋩": true, - "⨓": true, - "≿": true, - "с": true, - "⋅": true, - "⊡": true, - "⩦": true, - "⇘": true, - "⤥": true, - "↘": true, - "↘": true, - "§": true, - "§": true, - ";": true, - "⤩": true, - "∖": true, - "∖": true, - "✶": true, - "𝔰": true, - "⌢": true, - "♯": true, - "щ": true, - "ш": true, - "∣": true, - "∥": true, - "­": true, - "­": true, - "σ": true, - "ς": true, - "ς": true, - "∼": true, - "⩪": true, - "≃": true, - "≃": true, - "⪞": true, - "⪠": true, - "⪝": true, - "⪟": true, - "≆": true, - "⨤": true, - "⥲": true, - "←": true, - "∖": true, - "⨳": true, - "⧤": true, - "∣": true, - "⌣": true, - "⪪": true, - "⪬": true, - "⪬︀": true, - "ь": true, - "/": true, - "⧄": true, - "⌿": true, - "𝕤": true, - "♠": true, - "♠": true, - "∥": true, - "⊓": true, - "⊓︀": true, - "⊔": true, - "⊔︀": true, - "⊏": true, - "⊑": true, - "⊏": true, - "⊑": true, - "⊐": true, - "⊒": true, - "⊐": true, - "⊒": true, - "□": true, - "□": true, - "▪": true, - "▪": true, - "→": true, - "𝓈": true, - "∖": true, - "⌣": true, - "⋆": true, - "☆": true, - "★": true, - "ϵ": true, - "ϕ": true, - "¯": true, - "⊂": true, - "⫅": true, - "⪽": true, - "⊆": true, - "⫃": true, - "⫁": true, - "⫋": true, - "⊊": true, - "⪿": true, - "⥹": true, - "⊂": true, - "⊆": true, - "⫅": true, - "⊊": true, - "⫋": true, - "⫇": true, - "⫕": true, - "⫓": true, - "≻": true, - "⪸": true, - "≽": true, - "⪰": true, - "⪺": true, - "⪶": true, - "⋩": true, - "≿": true, - "∑": true, - "♪": true, - "¹": true, - "¹": true, - "²": true, - "²": true, - "³": true, - "³": true, - "⊃": true, - "⫆": true, - "⪾": true, - "⫘": true, - "⊇": true, - "⫄": true, - "⟉": true, - "⫗": true, - "⥻": true, - "⫂": true, - "⫌": true, - "⊋": true, - "⫀": true, - "⊃": true, - "⊇": true, - "⫆": true, - "⊋": true, - "⫌": true, - "⫈": true, - "⫔": true, - "⫖": true, - "⇙": true, - "⤦": true, - "↙": true, - "↙": true, - "⤪": true, - "ß": true, - "ß": true, - "⌖": true, - "τ": true, - "⎴": true, - "ť": true, - "ţ": true, - "т": true, - "⃛": true, - "⌕": true, - "𝔱": true, - "∴": true, - "∴": true, - "θ": true, - "ϑ": true, - "ϑ": true, - "≈": true, - "∼": true, - " ": true, - "≈": true, - "∼": true, - "þ": true, - "þ": true, - "˜": true, - "×": true, - "×": true, - "⊠": true, - "⨱": true, - "⨰": true, - "∭": true, - "⤨": true, - "⊤": true, - "⌶": true, - "⫱": true, - "𝕥": true, - "⫚": true, - "⤩": true, - "‴": true, - "™": true, - "▵": true, - "▿": true, - "◃": true, - "⊴": true, - "≜": true, - "▹": true, - "⊵": true, - "◬": true, - "≜": true, - "⨺": true, - "⨹": true, - "⧍": true, - "⨻": true, - "⏢": true, - "𝓉": true, - "ц": true, - "ћ": true, - "ŧ": true, - "≬": true, - "↞": true, - "↠": true, - "⇑": true, - "⥣": true, - "ú": true, - "ú": true, - "↑": true, - "ў": true, - "ŭ": true, - "û": true, - "û": true, - "у": true, - "⇅": true, - "ű": true, - "⥮": true, - "⥾": true, - "𝔲": true, - "ù": true, - "ù": true, - "↿": true, - "↾": true, - "▀": true, - "⌜": true, - "⌜": true, - "⌏": true, - "◸": true, - "ū": true, - "¨": true, - "¨": true, - "ų": true, - "𝕦": true, - "↑": true, - "↕": true, - "↿": true, - "↾": true, - "⊎": true, - "υ": true, - "ϒ": true, - "υ": true, - "⇈": true, - "⌝": true, - "⌝": true, - "⌎": true, - "ů": true, - "◹": true, - "𝓊": true, - "⋰": true, - "ũ": true, - "▵": true, - "▴": true, - "⇈": true, - "ü": true, - "ü": true, - "⦧": true, - "⇕": true, - "⫨": true, - "⫩": true, - "⊨": true, - "⦜": true, - "ϵ": true, - "ϰ": true, - "∅": true, - "ϕ": true, - "ϖ": true, - "∝": true, - "↕": true, - "ϱ": true, - "ς": true, - "⊊︀": true, - "⫋︀": true, - "⊋︀": true, - "⫌︀": true, - "ϑ": true, - "⊲": true, - "⊳": true, - "в": true, - "⊢": true, - "∨": true, - "⊻": true, - "≚": true, - "⋮": true, - "|": true, - "|": true, - "𝔳": true, - "⊲": true, - "⊂⃒": true, - "⊃⃒": true, - "𝕧": true, - "∝": true, - "⊳": true, - "𝓋": true, - "⫋︀": true, - "⊊︀": true, - "⫌︀": true, - "⊋︀": true, - "⦚": true, - "ŵ": true, - "⩟": true, - "∧": true, - "≙": true, - "℘": true, - "𝔴": true, - "𝕨": true, - "℘": true, - "≀": true, - "≀": true, - "𝓌": true, - "⋂": true, - "◯": true, - "⋃": true, - "▽": true, - "𝔵": true, - "⟺": true, - "⟷": true, - "ξ": true, - "⟸": true, - "⟵": true, - "⟼": true, - "⋻": true, - "⨀": true, - "𝕩": true, - "⨁": true, - "⨂": true, - "⟹": true, - "⟶": true, - "𝓍": true, - "⨆": true, - "⨄": true, - "△": true, - "⋁": true, - "⋀": true, - "ý": true, - "ý": true, - "я": true, - "ŷ": true, - "ы": true, - "¥": true, - "¥": true, - "𝔶": true, - "ї": true, - "𝕪": true, - "𝓎": true, - "ю": true, - "ÿ": true, - "ÿ": true, - "ź": true, - "ž": true, - "з": true, - "ż": true, - "ℨ": true, - "ζ": true, - "𝔷": true, - "ж": true, - "⇝": true, - "𝕫": true, - "𝓏": true, - "‍": true, - "‌": true, -} diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go deleted file mode 100644 index 6ab60102c9..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/esc.go +++ /dev/null @@ -1,70 +0,0 @@ -package blackfriday - -import ( - "html" - "io" -) - -var htmlEscaper = [256][]byte{ - '&': []byte("&"), - '<': []byte("<"), - '>': []byte(">"), - '"': []byte("""), -} - -func escapeHTML(w io.Writer, s []byte) { - escapeEntities(w, s, false) -} - -func escapeAllHTML(w io.Writer, s []byte) { - escapeEntities(w, s, true) -} - -func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) { - var start, end int - for end < len(s) { - escSeq := htmlEscaper[s[end]] - if escSeq != nil { - isEntity, entityEnd := nodeIsEntity(s, end) - if isEntity && !escapeValidEntities { - w.Write(s[start : entityEnd+1]) - start = entityEnd + 1 - } else { - w.Write(s[start:end]) - w.Write(escSeq) - start = end + 1 - } - } - end++ - } - if start < len(s) && end <= len(s) { - w.Write(s[start:end]) - } -} - -func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) { - isEntity = false - endEntityPos = end + 1 - - if s[end] == '&' { - for endEntityPos < len(s) { - if s[endEntityPos] == ';' { - if entities[string(s[end:endEntityPos+1])] { - isEntity = true - break - } - } - if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' { - break - } - endEntityPos++ - } - } - - return isEntity, endEntityPos -} - -func escLink(w io.Writer, text []byte) { - unesc := html.UnescapeString(string(text)) - escapeHTML(w, []byte(unesc)) -} diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go deleted file mode 100644 index cb4f26e30f..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/html.go +++ /dev/null @@ -1,952 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// -// HTML rendering backend -// -// - -package blackfriday - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strings" -) - -// HTMLFlags control optional behavior of HTML renderer. -type HTMLFlags int - -// HTML renderer configuration options. -const ( - HTMLFlagsNone HTMLFlags = 0 - SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks - SkipImages // Skip embedded images - SkipLinks // Skip all links - Safelink // Only link to trusted protocols - NofollowLinks // Only link with rel="nofollow" - NoreferrerLinks // Only link with rel="noreferrer" - NoopenerLinks // Only link with rel="noopener" - HrefTargetBlank // Add a blank target - CompletePage // Generate a complete HTML page - UseXHTML // Generate XHTML output instead of HTML - FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source - Smartypants // Enable smart punctuation substitutions - SmartypantsFractions // Enable smart fractions (with Smartypants) - SmartypantsDashes // Enable smart dashes (with Smartypants) - SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants) - SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering - SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants) - TOC // Generate a table of contents -) - -var ( - htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) -) - -const ( - htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + - processingInstruction + "|" + declaration + "|" + cdata + ")" - closeTag = "]" - openTag = "<" + tagName + attribute + "*" + "\\s*/?>" - attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" - attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" - attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" - attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*" - cdata = "" - declaration = "]*>" - doubleQuotedValue = "\"[^\"]*\"" - htmlComment = "|" - processingInstruction = "[<][?].*?[?][>]" - singleQuotedValue = "'[^']*'" - tagName = "[A-Za-z][A-Za-z0-9-]*" - unquotedValue = "[^\"'=<>`\\x00-\\x20]+" -) - -// HTMLRendererParameters is a collection of supplementary parameters tweaking -// the behavior of various parts of HTML renderer. -type HTMLRendererParameters struct { - // Prepend this text to each relative URL. - AbsolutePrefix string - // Add this text to each footnote anchor, to ensure uniqueness. - FootnoteAnchorPrefix string - // Show this text inside the tag for a footnote return link, if the - // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string - // [return] is used. - FootnoteReturnLinkContents string - // If set, add this text to the front of each Heading ID, to ensure - // uniqueness. - HeadingIDPrefix string - // If set, add this text to the back of each Heading ID, to ensure uniqueness. - HeadingIDSuffix string - // Increase heading levels: if the offset is 1,

becomes

etc. - // Negative offset is also valid. - // Resulting levels are clipped between 1 and 6. - HeadingLevelOffset int - - Title string // Document title (used if CompletePage is set) - CSS string // Optional CSS file URL (used if CompletePage is set) - Icon string // Optional icon file URL (used if CompletePage is set) - - Flags HTMLFlags // Flags allow customizing this renderer's behavior -} - -// HTMLRenderer is a type that implements the Renderer interface for HTML output. -// -// Do not create this directly, instead use the NewHTMLRenderer function. -type HTMLRenderer struct { - HTMLRendererParameters - - closeTag string // how to end singleton tags: either " />" or ">" - - // Track heading IDs to prevent ID collision in a single generation. - headingIDs map[string]int - - lastOutputLen int - disableTags int - - sr *SPRenderer -} - -const ( - xhtmlClose = " />" - htmlClose = ">" -) - -// NewHTMLRenderer creates and configures an HTMLRenderer object, which -// satisfies the Renderer interface. -func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { - // configure the rendering engine - closeTag := htmlClose - if params.Flags&UseXHTML != 0 { - closeTag = xhtmlClose - } - - if params.FootnoteReturnLinkContents == "" { - // U+FE0E is VARIATION SELECTOR-15. - // It suppresses automatic emoji presentation of the preceding - // U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS. - params.FootnoteReturnLinkContents = "↩\ufe0e" - } - - return &HTMLRenderer{ - HTMLRendererParameters: params, - - closeTag: closeTag, - headingIDs: make(map[string]int), - - sr: NewSmartypantsRenderer(params.Flags), - } -} - -func isHTMLTag(tag []byte, tagname string) bool { - found, _ := findHTMLTagPos(tag, tagname) - return found -} - -// Look for a character, but ignore it when it's in any kind of quotes, it -// might be JavaScript -func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { - inSingleQuote := false - inDoubleQuote := false - inGraveQuote := false - i := start - for i < len(html) { - switch { - case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: - return i - case html[i] == '\'': - inSingleQuote = !inSingleQuote - case html[i] == '"': - inDoubleQuote = !inDoubleQuote - case html[i] == '`': - inGraveQuote = !inGraveQuote - } - i++ - } - return start -} - -func findHTMLTagPos(tag []byte, tagname string) (bool, int) { - i := 0 - if i < len(tag) && tag[0] != '<' { - return false, -1 - } - i++ - i = skipSpace(tag, i) - - if i < len(tag) && tag[i] == '/' { - i++ - } - - i = skipSpace(tag, i) - j := 0 - for ; i < len(tag); i, j = i+1, j+1 { - if j >= len(tagname) { - break - } - - if strings.ToLower(string(tag[i]))[0] != tagname[j] { - return false, -1 - } - } - - if i == len(tag) { - return false, -1 - } - - rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') - if rightAngle >= i { - return true, rightAngle - } - - return false, -1 -} - -func skipSpace(tag []byte, i int) int { - for i < len(tag) && isspace(tag[i]) { - i++ - } - return i -} - -func isRelativeLink(link []byte) (yes bool) { - // a tag begin with '#' - if link[0] == '#' { - return true - } - - // link begin with '/' but not '//', the second maybe a protocol relative link - if len(link) >= 2 && link[0] == '/' && link[1] != '/' { - return true - } - - // only the root '/' - if len(link) == 1 && link[0] == '/' { - return true - } - - // current directory : begin with "./" - if bytes.HasPrefix(link, []byte("./")) { - return true - } - - // parent directory : begin with "../" - if bytes.HasPrefix(link, []byte("../")) { - return true - } - - return false -} - -func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { - for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { - tmp := fmt.Sprintf("%s-%d", id, count+1) - - if _, tmpFound := r.headingIDs[tmp]; !tmpFound { - r.headingIDs[id] = count + 1 - id = tmp - } else { - id = id + "-1" - } - } - - if _, found := r.headingIDs[id]; !found { - r.headingIDs[id] = 0 - } - - return id -} - -func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { - if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { - newDest := r.AbsolutePrefix - if link[0] != '/' { - newDest += "/" - } - newDest += string(link) - return []byte(newDest) - } - return link -} - -func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { - if isRelativeLink(link) { - return attrs - } - val := []string{} - if flags&NofollowLinks != 0 { - val = append(val, "nofollow") - } - if flags&NoreferrerLinks != 0 { - val = append(val, "noreferrer") - } - if flags&NoopenerLinks != 0 { - val = append(val, "noopener") - } - if flags&HrefTargetBlank != 0 { - attrs = append(attrs, "target=\"_blank\"") - } - if len(val) == 0 { - return attrs - } - attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) - return append(attrs, attr) -} - -func isMailto(link []byte) bool { - return bytes.HasPrefix(link, []byte("mailto:")) -} - -func needSkipLink(flags HTMLFlags, dest []byte) bool { - if flags&SkipLinks != 0 { - return true - } - return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) -} - -func isSmartypantable(node *Node) bool { - pt := node.Parent.Type - return pt != Link && pt != CodeBlock && pt != Code -} - -func appendLanguageAttr(attrs []string, info []byte) []string { - if len(info) == 0 { - return attrs - } - endOfLang := bytes.IndexAny(info, "\t ") - if endOfLang < 0 { - endOfLang = len(info) - } - return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) -} - -func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { - w.Write(name) - if len(attrs) > 0 { - w.Write(spaceBytes) - w.Write([]byte(strings.Join(attrs, " "))) - } - w.Write(gtBytes) - r.lastOutputLen = 1 -} - -func footnoteRef(prefix string, node *Node) []byte { - urlFrag := prefix + string(slugify(node.Destination)) - anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID) - return []byte(fmt.Sprintf(`%s`, urlFrag, anchor)) -} - -func footnoteItem(prefix string, slug []byte) []byte { - return []byte(fmt.Sprintf(`
  • `, prefix, slug)) -} - -func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { - const format = ` %s` - return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) -} - -func itemOpenCR(node *Node) bool { - if node.Prev == nil { - return false - } - ld := node.Parent.ListData - return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 -} - -func skipParagraphTags(node *Node) bool { - grandparent := node.Parent.Parent - if grandparent == nil || grandparent.Type != List { - return false - } - tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 - return grandparent.Type == List && tightOrTerm -} - -func cellAlignment(align CellAlignFlags) string { - switch align { - case TableAlignmentLeft: - return "left" - case TableAlignmentRight: - return "right" - case TableAlignmentCenter: - return "center" - default: - return "" - } -} - -func (r *HTMLRenderer) out(w io.Writer, text []byte) { - if r.disableTags > 0 { - w.Write(htmlTagRe.ReplaceAll(text, []byte{})) - } else { - w.Write(text) - } - r.lastOutputLen = len(text) -} - -func (r *HTMLRenderer) cr(w io.Writer) { - if r.lastOutputLen > 0 { - r.out(w, nlBytes) - } -} - -var ( - nlBytes = []byte{'\n'} - gtBytes = []byte{'>'} - spaceBytes = []byte{' '} -) - -var ( - brTag = []byte("
    ") - brXHTMLTag = []byte("
    ") - emTag = []byte("") - emCloseTag = []byte("") - strongTag = []byte("") - strongCloseTag = []byte("") - delTag = []byte("") - delCloseTag = []byte("") - ttTag = []byte("") - ttCloseTag = []byte("") - aTag = []byte("") - preTag = []byte("
    ")
    -	preCloseTag        = []byte("
    ") - codeTag = []byte("") - codeCloseTag = []byte("") - pTag = []byte("

    ") - pCloseTag = []byte("

    ") - blockquoteTag = []byte("
    ") - blockquoteCloseTag = []byte("
    ") - hrTag = []byte("
    ") - hrXHTMLTag = []byte("
    ") - ulTag = []byte("
      ") - ulCloseTag = []byte("
    ") - olTag = []byte("
      ") - olCloseTag = []byte("
    ") - dlTag = []byte("
    ") - dlCloseTag = []byte("
    ") - liTag = []byte("
  • ") - liCloseTag = []byte("
  • ") - ddTag = []byte("
    ") - ddCloseTag = []byte("
    ") - dtTag = []byte("
    ") - dtCloseTag = []byte("
    ") - tableTag = []byte("") - tableCloseTag = []byte("
    ") - tdTag = []byte("") - thTag = []byte("") - theadTag = []byte("") - theadCloseTag = []byte("") - tbodyTag = []byte("") - tbodyCloseTag = []byte("") - trTag = []byte("") - trCloseTag = []byte("") - h1Tag = []byte("") - h2Tag = []byte("") - h3Tag = []byte("") - h4Tag = []byte("") - h5Tag = []byte("") - h6Tag = []byte("") - - footnotesDivBytes = []byte("\n
    \n\n") - footnotesCloseDivBytes = []byte("\n
    \n") -) - -func headingTagsFromLevel(level int) ([]byte, []byte) { - if level <= 1 { - return h1Tag, h1CloseTag - } - switch level { - case 2: - return h2Tag, h2CloseTag - case 3: - return h3Tag, h3CloseTag - case 4: - return h4Tag, h4CloseTag - case 5: - return h5Tag, h5CloseTag - } - return h6Tag, h6CloseTag -} - -func (r *HTMLRenderer) outHRTag(w io.Writer) { - if r.Flags&UseXHTML == 0 { - r.out(w, hrTag) - } else { - r.out(w, hrXHTMLTag) - } -} - -// RenderNode is a default renderer of a single node of a syntax tree. For -// block nodes it will be called twice: first time with entering=true, second -// time with entering=false, so that it could know when it's working on an open -// tag and when on close. It writes the result to w. -// -// The return value is a way to tell the calling walker to adjust its walk -// pattern: e.g. it can terminate the traversal by returning Terminate. Or it -// can ask the walker to skip a subtree of this node by returning SkipChildren. -// The typical behavior is to return GoToNext, which asks for the usual -// traversal to the next node. -func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { - attrs := []string{} - switch node.Type { - case Text: - if r.Flags&Smartypants != 0 { - var tmp bytes.Buffer - escapeHTML(&tmp, node.Literal) - r.sr.Process(w, tmp.Bytes()) - } else { - if node.Parent.Type == Link { - escLink(w, node.Literal) - } else { - escapeHTML(w, node.Literal) - } - } - case Softbreak: - r.cr(w) - // TODO: make it configurable via out(renderer.softbreak) - case Hardbreak: - if r.Flags&UseXHTML == 0 { - r.out(w, brTag) - } else { - r.out(w, brXHTMLTag) - } - r.cr(w) - case Emph: - if entering { - r.out(w, emTag) - } else { - r.out(w, emCloseTag) - } - case Strong: - if entering { - r.out(w, strongTag) - } else { - r.out(w, strongCloseTag) - } - case Del: - if entering { - r.out(w, delTag) - } else { - r.out(w, delCloseTag) - } - case HTMLSpan: - if r.Flags&SkipHTML != 0 { - break - } - r.out(w, node.Literal) - case Link: - // mark it but don't link it if it is not a safe link: no smartypants - dest := node.LinkData.Destination - if needSkipLink(r.Flags, dest) { - if entering { - r.out(w, ttTag) - } else { - r.out(w, ttCloseTag) - } - } else { - if entering { - dest = r.addAbsPrefix(dest) - var hrefBuf bytes.Buffer - hrefBuf.WriteString("href=\"") - escLink(&hrefBuf, dest) - hrefBuf.WriteByte('"') - attrs = append(attrs, hrefBuf.String()) - if node.NoteID != 0 { - r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) - break - } - attrs = appendLinkAttrs(attrs, r.Flags, dest) - if len(node.LinkData.Title) > 0 { - var titleBuff bytes.Buffer - titleBuff.WriteString("title=\"") - escapeHTML(&titleBuff, node.LinkData.Title) - titleBuff.WriteByte('"') - attrs = append(attrs, titleBuff.String()) - } - r.tag(w, aTag, attrs) - } else { - if node.NoteID != 0 { - break - } - r.out(w, aCloseTag) - } - } - case Image: - if r.Flags&SkipImages != 0 { - return SkipChildren - } - if entering { - dest := node.LinkData.Destination - dest = r.addAbsPrefix(dest) - if r.disableTags == 0 { - //if options.safe && potentiallyUnsafe(dest) { - //out(w, ``)
-				//} else {
-				r.out(w, []byte(`<img src=`)) - } - } - case Code: - r.out(w, codeTag) - escapeAllHTML(w, node.Literal) - r.out(w, codeCloseTag) - case Document: - break - case Paragraph: - if skipParagraphTags(node) { - break - } - if entering { - // TODO: untangle this clusterfuck about when the newlines need - // to be added and when not. - if node.Prev != nil { - switch node.Prev.Type { - case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: - r.cr(w) - } - } - if node.Parent.Type == BlockQuote && node.Prev == nil { - r.cr(w) - } - r.out(w, pTag) - } else { - r.out(w, pCloseTag) - if !(node.Parent.Type == Item && node.Next == nil) { - r.cr(w) - } - } - case BlockQuote: - if entering { - r.cr(w) - r.out(w, blockquoteTag) - } else { - r.out(w, blockquoteCloseTag) - r.cr(w) - } - case HTMLBlock: - if r.Flags&SkipHTML != 0 { - break - } - r.cr(w) - r.out(w, node.Literal) - r.cr(w) - case Heading: - headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level - openTag, closeTag := headingTagsFromLevel(headingLevel) - if entering { - if node.IsTitleblock { - attrs = append(attrs, `class="title"`) - } - if node.HeadingID != "" { - id := r.ensureUniqueHeadingID(node.HeadingID) - if r.HeadingIDPrefix != "" { - id = r.HeadingIDPrefix + id - } - if r.HeadingIDSuffix != "" { - id = id + r.HeadingIDSuffix - } - attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) - } - r.cr(w) - r.tag(w, openTag, attrs) - } else { - r.out(w, closeTag) - if !(node.Parent.Type == Item && node.Next == nil) { - r.cr(w) - } - } - case HorizontalRule: - r.cr(w) - r.outHRTag(w) - r.cr(w) - case List: - openTag := ulTag - closeTag := ulCloseTag - if node.ListFlags&ListTypeOrdered != 0 { - openTag = olTag - closeTag = olCloseTag - } - if node.ListFlags&ListTypeDefinition != 0 { - openTag = dlTag - closeTag = dlCloseTag - } - if entering { - if node.IsFootnotesList { - r.out(w, footnotesDivBytes) - r.outHRTag(w) - r.cr(w) - } - r.cr(w) - if node.Parent.Type == Item && node.Parent.Parent.Tight { - r.cr(w) - } - r.tag(w, openTag[:len(openTag)-1], attrs) - r.cr(w) - } else { - r.out(w, closeTag) - //cr(w) - //if node.parent.Type != Item { - // cr(w) - //} - if node.Parent.Type == Item && node.Next != nil { - r.cr(w) - } - if node.Parent.Type == Document || node.Parent.Type == BlockQuote { - r.cr(w) - } - if node.IsFootnotesList { - r.out(w, footnotesCloseDivBytes) - } - } - case Item: - openTag := liTag - closeTag := liCloseTag - if node.ListFlags&ListTypeDefinition != 0 { - openTag = ddTag - closeTag = ddCloseTag - } - if node.ListFlags&ListTypeTerm != 0 { - openTag = dtTag - closeTag = dtCloseTag - } - if entering { - if itemOpenCR(node) { - r.cr(w) - } - if node.ListData.RefLink != nil { - slug := slugify(node.ListData.RefLink) - r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) - break - } - r.out(w, openTag) - } else { - if node.ListData.RefLink != nil { - slug := slugify(node.ListData.RefLink) - if r.Flags&FootnoteReturnLinks != 0 { - r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) - } - } - r.out(w, closeTag) - r.cr(w) - } - case CodeBlock: - attrs = appendLanguageAttr(attrs, node.Info) - r.cr(w) - r.out(w, preTag) - r.tag(w, codeTag[:len(codeTag)-1], attrs) - escapeAllHTML(w, node.Literal) - r.out(w, codeCloseTag) - r.out(w, preCloseTag) - if node.Parent.Type != Item { - r.cr(w) - } - case Table: - if entering { - r.cr(w) - r.out(w, tableTag) - } else { - r.out(w, tableCloseTag) - r.cr(w) - } - case TableCell: - openTag := tdTag - closeTag := tdCloseTag - if node.IsHeader { - openTag = thTag - closeTag = thCloseTag - } - if entering { - align := cellAlignment(node.Align) - if align != "" { - attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) - } - if node.Prev == nil { - r.cr(w) - } - r.tag(w, openTag, attrs) - } else { - r.out(w, closeTag) - r.cr(w) - } - case TableHead: - if entering { - r.cr(w) - r.out(w, theadTag) - } else { - r.out(w, theadCloseTag) - r.cr(w) - } - case TableBody: - if entering { - r.cr(w) - r.out(w, tbodyTag) - // XXX: this is to adhere to a rather silly test. Should fix test. - if node.FirstChild == nil { - r.cr(w) - } - } else { - r.out(w, tbodyCloseTag) - r.cr(w) - } - case TableRow: - if entering { - r.cr(w) - r.out(w, trTag) - } else { - r.out(w, trCloseTag) - r.cr(w) - } - default: - panic("Unknown node type " + node.Type.String()) - } - return GoToNext -} - -// RenderHeader writes HTML document preamble and TOC if requested. -func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { - r.writeDocumentHeader(w) - if r.Flags&TOC != 0 { - r.writeTOC(w, ast) - } -} - -// RenderFooter writes HTML document footer. -func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { - if r.Flags&CompletePage == 0 { - return - } - io.WriteString(w, "\n\n\n") -} - -func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { - if r.Flags&CompletePage == 0 { - return - } - ending := "" - if r.Flags&UseXHTML != 0 { - io.WriteString(w, "\n") - io.WriteString(w, "\n") - ending = " /" - } else { - io.WriteString(w, "\n") - io.WriteString(w, "\n") - } - io.WriteString(w, "\n") - io.WriteString(w, " ") - if r.Flags&Smartypants != 0 { - r.sr.Process(w, []byte(r.Title)) - } else { - escapeHTML(w, []byte(r.Title)) - } - io.WriteString(w, "\n") - io.WriteString(w, " \n") - io.WriteString(w, " \n") - if r.CSS != "" { - io.WriteString(w, " \n") - } - if r.Icon != "" { - io.WriteString(w, " \n") - } - io.WriteString(w, "\n") - io.WriteString(w, "\n\n") -} - -func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { - buf := bytes.Buffer{} - - inHeading := false - tocLevel := 0 - headingCount := 0 - - ast.Walk(func(node *Node, entering bool) WalkStatus { - if node.Type == Heading && !node.HeadingData.IsTitleblock { - inHeading = entering - if entering { - node.HeadingID = fmt.Sprintf("toc_%d", headingCount) - if node.Level == tocLevel { - buf.WriteString("\n\n
  • ") - } else if node.Level < tocLevel { - for node.Level < tocLevel { - tocLevel-- - buf.WriteString("
  • \n") - } - buf.WriteString("\n\n
  • ") - } else { - for node.Level > tocLevel { - tocLevel++ - buf.WriteString("\n") - } - - if buf.Len() > 0 { - io.WriteString(w, "\n") - } - r.lastOutputLen = buf.Len() -} diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go deleted file mode 100644 index d45bd94172..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/inline.go +++ /dev/null @@ -1,1228 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// Functions to parse inline elements. -// - -package blackfriday - -import ( - "bytes" - "regexp" - "strconv" -) - -var ( - urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` - anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) - - // https://www.w3.org/TR/html5/syntax.html#character-references - // highest unicode code point in 17 planes (2^20): 1,114,112d = - // 7 dec digits or 6 hex digits - // named entity references can be 2-31 characters with stuff like < - // at one end and ∳ at the other. There - // are also sometimes numbers at the end, although this isn't inherent - // in the specification; there are never numbers anywhere else in - // current character references, though; see ¾ and ▒, etc. - // https://www.w3.org/TR/html5/syntax.html#named-character-references - // - // entity := "&" (named group | number ref) ";" - // named group := [a-zA-Z]{2,31}[0-9]{0,2} - // number ref := "#" (dec ref | hex ref) - // dec ref := [0-9]{1,7} - // hex ref := ("x" | "X") [0-9a-fA-F]{1,6} - htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`) -) - -// Functions to parse text within a block -// Each function returns the number of chars taken care of -// data is the complete block being rendered -// offset is the number of valid chars before the current cursor - -func (p *Markdown) inline(currBlock *Node, data []byte) { - // handlers might call us recursively: enforce a maximum depth - if p.nesting >= p.maxNesting || len(data) == 0 { - return - } - p.nesting++ - beg, end := 0, 0 - for end < len(data) { - handler := p.inlineCallback[data[end]] - if handler != nil { - if consumed, node := handler(p, data, end); consumed == 0 { - // No action from the callback. - end++ - } else { - // Copy inactive chars into the output. - currBlock.AppendChild(text(data[beg:end])) - if node != nil { - currBlock.AppendChild(node) - } - // Skip past whatever the callback used. - beg = end + consumed - end = beg - } - } else { - end++ - } - } - if beg < len(data) { - if data[end-1] == '\n' { - end-- - } - currBlock.AppendChild(text(data[beg:end])) - } - p.nesting-- -} - -// single and double emphasis parsing -func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - c := data[0] - - if len(data) > 2 && data[1] != c { - // whitespace cannot follow an opening emphasis; - // strikethrough only takes two characters '~~' - if c == '~' || isspace(data[1]) { - return 0, nil - } - ret, node := helperEmphasis(p, data[1:], c) - if ret == 0 { - return 0, nil - } - - return ret + 1, node - } - - if len(data) > 3 && data[1] == c && data[2] != c { - if isspace(data[2]) { - return 0, nil - } - ret, node := helperDoubleEmphasis(p, data[2:], c) - if ret == 0 { - return 0, nil - } - - return ret + 2, node - } - - if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { - if c == '~' || isspace(data[3]) { - return 0, nil - } - ret, node := helperTripleEmphasis(p, data, 3, c) - if ret == 0 { - return 0, nil - } - - return ret + 3, node - } - - return 0, nil -} - -func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - - nb := 0 - - // count the number of backticks in the delimiter - for nb < len(data) && data[nb] == '`' { - nb++ - } - - // find the next delimiter - i, end := 0, 0 - for end = nb; end < len(data) && i < nb; end++ { - if data[end] == '`' { - i++ - } else { - i = 0 - } - } - - // no matching delimiter? - if i < nb && end >= len(data) { - return 0, nil - } - - // trim outside whitespace - fBegin := nb - for fBegin < end && data[fBegin] == ' ' { - fBegin++ - } - - fEnd := end - nb - for fEnd > fBegin && data[fEnd-1] == ' ' { - fEnd-- - } - - // render the code span - if fBegin != fEnd { - code := NewNode(Code) - code.Literal = data[fBegin:fEnd] - return end, code - } - - return end, nil -} - -// newline preceded by two spaces becomes
    -func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { - origOffset := offset - for offset < len(data) && data[offset] == ' ' { - offset++ - } - - if offset < len(data) && data[offset] == '\n' { - if offset-origOffset >= 2 { - return offset - origOffset + 1, NewNode(Hardbreak) - } - return offset - origOffset, nil - } - return 0, nil -} - -// newline without two spaces works when HardLineBreak is enabled -func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { - if p.extensions&HardLineBreak != 0 { - return 1, NewNode(Hardbreak) - } - return 0, nil -} - -type linkType int - -const ( - linkNormal linkType = iota - linkImg - linkDeferredFootnote - linkInlineFootnote -) - -func isReferenceStyleLink(data []byte, pos int, t linkType) bool { - if t == linkDeferredFootnote { - return false - } - return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' -} - -func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { - if offset < len(data)-1 && data[offset+1] == '[' { - return link(p, data, offset) - } - return 0, nil -} - -func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { - if offset < len(data)-1 && data[offset+1] == '[' { - return link(p, data, offset) - } - return 0, nil -} - -// '[': parse a link or an image or a footnote -func link(p *Markdown, data []byte, offset int) (int, *Node) { - // no links allowed inside regular links, footnote, and deferred footnotes - if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { - return 0, nil - } - - var t linkType - switch { - // special case: ![^text] == deferred footnote (that follows something with - // an exclamation point) - case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': - t = linkDeferredFootnote - // ![alt] == image - case offset >= 0 && data[offset] == '!': - t = linkImg - offset++ - // ^[text] == inline footnote - // [^refId] == deferred footnote - case p.extensions&Footnotes != 0: - if offset >= 0 && data[offset] == '^' { - t = linkInlineFootnote - offset++ - } else if len(data)-1 > offset && data[offset+1] == '^' { - t = linkDeferredFootnote - } - // [text] == regular link - default: - t = linkNormal - } - - data = data[offset:] - - var ( - i = 1 - noteID int - title, link, altContent []byte - textHasNl = false - ) - - if t == linkDeferredFootnote { - i++ - } - - // look for the matching closing bracket - for level := 1; level > 0 && i < len(data); i++ { - switch { - case data[i] == '\n': - textHasNl = true - - case isBackslashEscaped(data, i): - continue - - case data[i] == '[': - level++ - - case data[i] == ']': - level-- - if level <= 0 { - i-- // compensate for extra i++ in for loop - } - } - } - - if i >= len(data) { - return 0, nil - } - - txtE := i - i++ - var footnoteNode *Node - - // skip any amount of whitespace or newline - // (this is much more lax than original markdown syntax) - for i < len(data) && isspace(data[i]) { - i++ - } - - // inline style link - switch { - case i < len(data) && data[i] == '(': - // skip initial whitespace - i++ - - for i < len(data) && isspace(data[i]) { - i++ - } - - linkB := i - - // look for link end: ' " ) - findlinkend: - for i < len(data) { - switch { - case data[i] == '\\': - i += 2 - - case data[i] == ')' || data[i] == '\'' || data[i] == '"': - break findlinkend - - default: - i++ - } - } - - if i >= len(data) { - return 0, nil - } - linkE := i - - // look for title end if present - titleB, titleE := 0, 0 - if data[i] == '\'' || data[i] == '"' { - i++ - titleB = i - - findtitleend: - for i < len(data) { - switch { - case data[i] == '\\': - i += 2 - - case data[i] == ')': - break findtitleend - - default: - i++ - } - } - - if i >= len(data) { - return 0, nil - } - - // skip whitespace after title - titleE = i - 1 - for titleE > titleB && isspace(data[titleE]) { - titleE-- - } - - // check for closing quote presence - if data[titleE] != '\'' && data[titleE] != '"' { - titleB, titleE = 0, 0 - linkE = i - } - } - - // remove whitespace at the end of the link - for linkE > linkB && isspace(data[linkE-1]) { - linkE-- - } - - // remove optional angle brackets around the link - if data[linkB] == '<' { - linkB++ - } - if data[linkE-1] == '>' { - linkE-- - } - - // build escaped link and title - if linkE > linkB { - link = data[linkB:linkE] - } - - if titleE > titleB { - title = data[titleB:titleE] - } - - i++ - - // reference style link - case isReferenceStyleLink(data, i, t): - var id []byte - altContentConsidered := false - - // look for the id - i++ - linkB := i - for i < len(data) && data[i] != ']' { - i++ - } - if i >= len(data) { - return 0, nil - } - linkE := i - - // find the reference - if linkB == linkE { - if textHasNl { - var b bytes.Buffer - - for j := 1; j < txtE; j++ { - switch { - case data[j] != '\n': - b.WriteByte(data[j]) - case data[j-1] != ' ': - b.WriteByte(' ') - } - } - - id = b.Bytes() - } else { - id = data[1:txtE] - altContentConsidered = true - } - } else { - id = data[linkB:linkE] - } - - // find the reference with matching id - lr, ok := p.getRef(string(id)) - if !ok { - return 0, nil - } - - // keep link and title from reference - link = lr.link - title = lr.title - if altContentConsidered { - altContent = lr.text - } - i++ - - // shortcut reference style link or reference or inline footnote - default: - var id []byte - - // craft the id - if textHasNl { - var b bytes.Buffer - - for j := 1; j < txtE; j++ { - switch { - case data[j] != '\n': - b.WriteByte(data[j]) - case data[j-1] != ' ': - b.WriteByte(' ') - } - } - - id = b.Bytes() - } else { - if t == linkDeferredFootnote { - id = data[2:txtE] // get rid of the ^ - } else { - id = data[1:txtE] - } - } - - footnoteNode = NewNode(Item) - if t == linkInlineFootnote { - // create a new reference - noteID = len(p.notes) + 1 - - var fragment []byte - if len(id) > 0 { - if len(id) < 16 { - fragment = make([]byte, len(id)) - } else { - fragment = make([]byte, 16) - } - copy(fragment, slugify(id)) - } else { - fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) - } - - ref := &reference{ - noteID: noteID, - hasBlock: false, - link: fragment, - title: id, - footnote: footnoteNode, - } - - p.notes = append(p.notes, ref) - - link = ref.link - title = ref.title - } else { - // find the reference with matching id - lr, ok := p.getRef(string(id)) - if !ok { - return 0, nil - } - - if t == linkDeferredFootnote { - lr.noteID = len(p.notes) + 1 - lr.footnote = footnoteNode - p.notes = append(p.notes, lr) - } - - // keep link and title from reference - link = lr.link - // if inline footnote, title == footnote contents - title = lr.title - noteID = lr.noteID - } - - // rewind the whitespace - i = txtE + 1 - } - - var uLink []byte - if t == linkNormal || t == linkImg { - if len(link) > 0 { - var uLinkBuf bytes.Buffer - unescapeText(&uLinkBuf, link) - uLink = uLinkBuf.Bytes() - } - - // links need something to click on and somewhere to go - if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { - return 0, nil - } - } - - // call the relevant rendering function - var linkNode *Node - switch t { - case linkNormal: - linkNode = NewNode(Link) - linkNode.Destination = normalizeURI(uLink) - linkNode.Title = title - if len(altContent) > 0 { - linkNode.AppendChild(text(altContent)) - } else { - // links cannot contain other links, so turn off link parsing - // temporarily and recurse - insideLink := p.insideLink - p.insideLink = true - p.inline(linkNode, data[1:txtE]) - p.insideLink = insideLink - } - - case linkImg: - linkNode = NewNode(Image) - linkNode.Destination = uLink - linkNode.Title = title - linkNode.AppendChild(text(data[1:txtE])) - i++ - - case linkInlineFootnote, linkDeferredFootnote: - linkNode = NewNode(Link) - linkNode.Destination = link - linkNode.Title = title - linkNode.NoteID = noteID - linkNode.Footnote = footnoteNode - if t == linkInlineFootnote { - i++ - } - - default: - return 0, nil - } - - return i, linkNode -} - -func (p *Markdown) inlineHTMLComment(data []byte) int { - if len(data) < 5 { - return 0 - } - if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { - return 0 - } - i := 5 - // scan for an end-of-comment marker, across lines if necessary - for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { - i++ - } - // no end-of-comment marker - if i >= len(data) { - return 0 - } - return i + 1 -} - -func stripMailto(link []byte) []byte { - if bytes.HasPrefix(link, []byte("mailto://")) { - return link[9:] - } else if bytes.HasPrefix(link, []byte("mailto:")) { - return link[7:] - } else { - return link - } -} - -// autolinkType specifies a kind of autolink that gets detected. -type autolinkType int - -// These are the possible flag values for the autolink renderer. -const ( - notAutolink autolinkType = iota - normalAutolink - emailAutolink -) - -// '<' when tags or autolinks are allowed -func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - altype, end := tagLength(data) - if size := p.inlineHTMLComment(data); size > 0 { - end = size - } - if end > 2 { - if altype != notAutolink { - var uLink bytes.Buffer - unescapeText(&uLink, data[1:end+1-2]) - if uLink.Len() > 0 { - link := uLink.Bytes() - node := NewNode(Link) - node.Destination = link - if altype == emailAutolink { - node.Destination = append([]byte("mailto:"), link...) - } - node.AppendChild(text(stripMailto(link))) - return end, node - } - } else { - htmlTag := NewNode(HTMLSpan) - htmlTag.Literal = data[:end] - return end, htmlTag - } - } - - return end, nil -} - -// '\\' backslash escape -var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") - -func escape(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - - if len(data) > 1 { - if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { - return 2, NewNode(Hardbreak) - } - if bytes.IndexByte(escapeChars, data[1]) < 0 { - return 0, nil - } - - return 2, text(data[1:2]) - } - - return 2, nil -} - -func unescapeText(ob *bytes.Buffer, src []byte) { - i := 0 - for i < len(src) { - org := i - for i < len(src) && src[i] != '\\' { - i++ - } - - if i > org { - ob.Write(src[org:i]) - } - - if i+1 >= len(src) { - break - } - - ob.WriteByte(src[i+1]) - i += 2 - } -} - -// '&' escaped when it doesn't belong to an entity -// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; -func entity(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - - end := 1 - - if end < len(data) && data[end] == '#' { - end++ - } - - for end < len(data) && isalnum(data[end]) { - end++ - } - - if end < len(data) && data[end] == ';' { - end++ // real entity - } else { - return 0, nil // lone '&' - } - - ent := data[:end] - // undo & escaping or it will be converted to &amp; by another - // escaper in the renderer - if bytes.Equal(ent, []byte("&")) { - ent = []byte{'&'} - } - - return end, text(ent) -} - -func linkEndsWithEntity(data []byte, linkEnd int) bool { - entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) - return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd -} - -// hasPrefixCaseInsensitive is a custom implementation of -// strings.HasPrefix(strings.ToLower(s), prefix) -// we rolled our own because ToLower pulls in a huge machinery of lowercasing -// anything from Unicode and that's very slow. Since this func will only be -// used on ASCII protocol prefixes, we can take shortcuts. -func hasPrefixCaseInsensitive(s, prefix []byte) bool { - if len(s) < len(prefix) { - return false - } - delta := byte('a' - 'A') - for i, b := range prefix { - if b != s[i] && b != s[i]+delta { - return false - } - } - return true -} - -var protocolPrefixes = [][]byte{ - []byte("http://"), - []byte("https://"), - []byte("ftp://"), - []byte("file://"), - []byte("mailto:"), -} - -const shortestPrefix = 6 // len("ftp://"), the shortest of the above - -func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { - // quick check to rule out most false hits - if p.insideLink || len(data) < offset+shortestPrefix { - return 0, nil - } - for _, prefix := range protocolPrefixes { - endOfHead := offset + 8 // 8 is the len() of the longest prefix - if endOfHead > len(data) { - endOfHead = len(data) - } - if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { - return autoLink(p, data, offset) - } - } - return 0, nil -} - -func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { - // Now a more expensive check to see if we're not inside an anchor element - anchorStart := offset - offsetFromAnchor := 0 - for anchorStart > 0 && data[anchorStart] != '<' { - anchorStart-- - offsetFromAnchor++ - } - - anchorStr := anchorRe.Find(data[anchorStart:]) - if anchorStr != nil { - anchorClose := NewNode(HTMLSpan) - anchorClose.Literal = anchorStr[offsetFromAnchor:] - return len(anchorStr) - offsetFromAnchor, anchorClose - } - - // scan backward for a word boundary - rewind := 0 - for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { - rewind++ - } - if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters - return 0, nil - } - - origData := data - data = data[offset-rewind:] - - if !isSafeLink(data) { - return 0, nil - } - - linkEnd := 0 - for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { - linkEnd++ - } - - // Skip punctuation at the end of the link - if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { - linkEnd-- - } - - // But don't skip semicolon if it's a part of escaped entity: - if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { - linkEnd-- - } - - // See if the link finishes with a punctuation sign that can be closed. - var copen byte - switch data[linkEnd-1] { - case '"': - copen = '"' - case '\'': - copen = '\'' - case ')': - copen = '(' - case ']': - copen = '[' - case '}': - copen = '{' - default: - copen = 0 - } - - if copen != 0 { - bufEnd := offset - rewind + linkEnd - 2 - - openDelim := 1 - - /* Try to close the final punctuation sign in this same line; - * if we managed to close it outside of the URL, that means that it's - * not part of the URL. If it closes inside the URL, that means it - * is part of the URL. - * - * Examples: - * - * foo http://www.pokemon.com/Pikachu_(Electric) bar - * => http://www.pokemon.com/Pikachu_(Electric) - * - * foo (http://www.pokemon.com/Pikachu_(Electric)) bar - * => http://www.pokemon.com/Pikachu_(Electric) - * - * foo http://www.pokemon.com/Pikachu_(Electric)) bar - * => http://www.pokemon.com/Pikachu_(Electric)) - * - * (foo http://www.pokemon.com/Pikachu_(Electric)) bar - * => foo http://www.pokemon.com/Pikachu_(Electric) - */ - - for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { - if origData[bufEnd] == data[linkEnd-1] { - openDelim++ - } - - if origData[bufEnd] == copen { - openDelim-- - } - - bufEnd-- - } - - if openDelim == 0 { - linkEnd-- - } - } - - var uLink bytes.Buffer - unescapeText(&uLink, data[:linkEnd]) - - if uLink.Len() > 0 { - node := NewNode(Link) - node.Destination = uLink.Bytes() - node.AppendChild(text(uLink.Bytes())) - return linkEnd, node - } - - return linkEnd, nil -} - -func isEndOfLink(char byte) bool { - return isspace(char) || char == '<' -} - -var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} -var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} - -func isSafeLink(link []byte) bool { - for _, path := range validPaths { - if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { - if len(link) == len(path) { - return true - } else if isalnum(link[len(path)]) { - return true - } - } - } - - for _, prefix := range validUris { - // TODO: handle unicode here - // case-insensitive prefix test - if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { - return true - } - } - - return false -} - -// return the length of the given tag, or 0 is it's not valid -func tagLength(data []byte) (autolink autolinkType, end int) { - var i, j int - - // a valid tag can't be shorter than 3 chars - if len(data) < 3 { - return notAutolink, 0 - } - - // begins with a '<' optionally followed by '/', followed by letter or number - if data[0] != '<' { - return notAutolink, 0 - } - if data[1] == '/' { - i = 2 - } else { - i = 1 - } - - if !isalnum(data[i]) { - return notAutolink, 0 - } - - // scheme test - autolink = notAutolink - - // try to find the beginning of an URI - for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { - i++ - } - - if i > 1 && i < len(data) && data[i] == '@' { - if j = isMailtoAutoLink(data[i:]); j != 0 { - return emailAutolink, i + j - } - } - - if i > 2 && i < len(data) && data[i] == ':' { - autolink = normalAutolink - i++ - } - - // complete autolink test: no whitespace or ' or " - switch { - case i >= len(data): - autolink = notAutolink - case autolink != notAutolink: - j = i - - for i < len(data) { - if data[i] == '\\' { - i += 2 - } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { - break - } else { - i++ - } - - } - - if i >= len(data) { - return autolink, 0 - } - if i > j && data[i] == '>' { - return autolink, i + 1 - } - - // one of the forbidden chars has been found - autolink = notAutolink - } - i += bytes.IndexByte(data[i:], '>') - if i < 0 { - return autolink, 0 - } - return autolink, i + 1 -} - -// look for the address part of a mail autolink and '>' -// this is less strict than the original markdown e-mail address matching -func isMailtoAutoLink(data []byte) int { - nb := 0 - - // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' - for i := 0; i < len(data); i++ { - if isalnum(data[i]) { - continue - } - - switch data[i] { - case '@': - nb++ - - case '-', '.', '_': - break - - case '>': - if nb == 1 { - return i + 1 - } - return 0 - default: - return 0 - } - } - - return 0 -} - -// look for the next emph char, skipping other constructs -func helperFindEmphChar(data []byte, c byte) int { - i := 0 - - for i < len(data) { - for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { - i++ - } - if i >= len(data) { - return 0 - } - // do not count escaped chars - if i != 0 && data[i-1] == '\\' { - i++ - continue - } - if data[i] == c { - return i - } - - if data[i] == '`' { - // skip a code span - tmpI := 0 - i++ - for i < len(data) && data[i] != '`' { - if tmpI == 0 && data[i] == c { - tmpI = i - } - i++ - } - if i >= len(data) { - return tmpI - } - i++ - } else if data[i] == '[' { - // skip a link - tmpI := 0 - i++ - for i < len(data) && data[i] != ']' { - if tmpI == 0 && data[i] == c { - tmpI = i - } - i++ - } - i++ - for i < len(data) && (data[i] == ' ' || data[i] == '\n') { - i++ - } - if i >= len(data) { - return tmpI - } - if data[i] != '[' && data[i] != '(' { // not a link - if tmpI > 0 { - return tmpI - } - continue - } - cc := data[i] - i++ - for i < len(data) && data[i] != cc { - if tmpI == 0 && data[i] == c { - return i - } - i++ - } - if i >= len(data) { - return tmpI - } - i++ - } - } - return 0 -} - -func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { - i := 0 - - // skip one symbol if coming from emph3 - if len(data) > 1 && data[0] == c && data[1] == c { - i = 1 - } - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - if i >= len(data) { - return 0, nil - } - - if i+1 < len(data) && data[i+1] == c { - i++ - continue - } - - if data[i] == c && !isspace(data[i-1]) { - - if p.extensions&NoIntraEmphasis != 0 { - if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { - continue - } - } - - emph := NewNode(Emph) - p.inline(emph, data[:i]) - return i + 1, emph - } - } - - return 0, nil -} - -func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { - i := 0 - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - - if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { - nodeType := Strong - if c == '~' { - nodeType = Del - } - node := NewNode(nodeType) - p.inline(node, data[:i]) - return i + 2, node - } - i++ - } - return 0, nil -} - -func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { - i := 0 - origData := data - data = data[offset:] - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - - // skip whitespace preceded symbols - if data[i] != c || isspace(data[i-1]) { - continue - } - - switch { - case i+2 < len(data) && data[i+1] == c && data[i+2] == c: - // triple symbol found - strong := NewNode(Strong) - em := NewNode(Emph) - strong.AppendChild(em) - p.inline(em, data[:i]) - return i + 3, strong - case (i+1 < len(data) && data[i+1] == c): - // double symbol found, hand over to emph1 - length, node := helperEmphasis(p, origData[offset-2:], c) - if length == 0 { - return 0, nil - } - return length - 2, node - default: - // single symbol found, hand over to emph2 - length, node := helperDoubleEmphasis(p, origData[offset-1:], c) - if length == 0 { - return 0, nil - } - return length - 1, node - } - } - return 0, nil -} - -func text(s []byte) *Node { - node := NewNode(Text) - node.Literal = s - return node -} - -func normalizeURI(s []byte) []byte { - return s // TODO: implement -} diff --git a/vendor/github.com/russross/blackfriday/v2/markdown.go b/vendor/github.com/russross/blackfriday/v2/markdown.go deleted file mode 100644 index 58d2e4538c..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/markdown.go +++ /dev/null @@ -1,950 +0,0 @@ -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. - -package blackfriday - -import ( - "bytes" - "fmt" - "io" - "strings" - "unicode/utf8" -) - -// -// Markdown parsing and processing -// - -// Version string of the package. Appears in the rendered document when -// CompletePage flag is on. -const Version = "2.0" - -// Extensions is a bitwise or'ed collection of enabled Blackfriday's -// extensions. -type Extensions int - -// These are the supported markdown parsing extensions. -// OR these values together to select multiple extensions. -const ( - NoExtensions Extensions = 0 - NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words - Tables // Render tables - FencedCode // Render fenced code blocks - Autolink // Detect embedded URLs that are not explicitly marked - Strikethrough // Strikethrough text using ~~test~~ - LaxHTMLBlocks // Loosen up HTML block parsing rules - SpaceHeadings // Be strict about prefix heading rules - HardLineBreak // Translate newlines into line breaks - TabSizeEight // Expand tabs to eight spaces instead of four - Footnotes // Pandoc-style footnotes - NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block - HeadingIDs // specify heading IDs with {#id} - Titleblock // Titleblock ala pandoc - AutoHeadingIDs // Create the heading ID from the text - BackslashLineBreak // Translate trailing backslashes into line breaks - DefinitionLists // Render definition lists - - CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | - SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes - - CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | - Autolink | Strikethrough | SpaceHeadings | HeadingIDs | - BackslashLineBreak | DefinitionLists -) - -// ListType contains bitwise or'ed flags for list and list item objects. -type ListType int - -// These are the possible flag values for the ListItem renderer. -// Multiple flag values may be ORed together. -// These are mostly of interest if you are writing a new output format. -const ( - ListTypeOrdered ListType = 1 << iota - ListTypeDefinition - ListTypeTerm - - ListItemContainsBlock - ListItemBeginningOfList // TODO: figure out if this is of any use now - ListItemEndOfList -) - -// CellAlignFlags holds a type of alignment in a table cell. -type CellAlignFlags int - -// These are the possible flag values for the table cell renderer. -// Only a single one of these values will be used; they are not ORed together. -// These are mostly of interest if you are writing a new output format. -const ( - TableAlignmentLeft CellAlignFlags = 1 << iota - TableAlignmentRight - TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) -) - -// The size of a tab stop. -const ( - TabSizeDefault = 4 - TabSizeDouble = 8 -) - -// blockTags is a set of tags that are recognized as HTML block tags. -// Any of these can be included in markdown text without special escaping. -var blockTags = map[string]struct{}{ - "blockquote": {}, - "del": {}, - "div": {}, - "dl": {}, - "fieldset": {}, - "form": {}, - "h1": {}, - "h2": {}, - "h3": {}, - "h4": {}, - "h5": {}, - "h6": {}, - "iframe": {}, - "ins": {}, - "math": {}, - "noscript": {}, - "ol": {}, - "pre": {}, - "p": {}, - "script": {}, - "style": {}, - "table": {}, - "ul": {}, - - // HTML5 - "address": {}, - "article": {}, - "aside": {}, - "canvas": {}, - "figcaption": {}, - "figure": {}, - "footer": {}, - "header": {}, - "hgroup": {}, - "main": {}, - "nav": {}, - "output": {}, - "progress": {}, - "section": {}, - "video": {}, -} - -// Renderer is the rendering interface. This is mostly of interest if you are -// implementing a new rendering format. -// -// Only an HTML implementation is provided in this repository, see the README -// for external implementations. -type Renderer interface { - // RenderNode is the main rendering method. It will be called once for - // every leaf node and twice for every non-leaf node (first with - // entering=true, then with entering=false). The method should write its - // rendition of the node to the supplied writer w. - RenderNode(w io.Writer, node *Node, entering bool) WalkStatus - - // RenderHeader is a method that allows the renderer to produce some - // content preceding the main body of the output document. The header is - // understood in the broad sense here. For example, the default HTML - // renderer will write not only the HTML document preamble, but also the - // table of contents if it was requested. - // - // The method will be passed an entire document tree, in case a particular - // implementation needs to inspect it to produce output. - // - // The output should be written to the supplied writer w. If your - // implementation has no header to write, supply an empty implementation. - RenderHeader(w io.Writer, ast *Node) - - // RenderFooter is a symmetric counterpart of RenderHeader. - RenderFooter(w io.Writer, ast *Node) -} - -// Callback functions for inline parsing. One such function is defined -// for each character that triggers a response when parsing inline data. -type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) - -// Markdown is a type that holds extensions and the runtime state used by -// Parse, and the renderer. You can not use it directly, construct it with New. -type Markdown struct { - renderer Renderer - referenceOverride ReferenceOverrideFunc - refs map[string]*reference - inlineCallback [256]inlineParser - extensions Extensions - nesting int - maxNesting int - insideLink bool - - // Footnotes need to be ordered as well as available to quickly check for - // presence. If a ref is also a footnote, it's stored both in refs and here - // in notes. Slice is nil if footnotes not enabled. - notes []*reference - - doc *Node - tip *Node // = doc - oldTip *Node - lastMatchedContainer *Node // = doc - allClosed bool -} - -func (p *Markdown) getRef(refid string) (ref *reference, found bool) { - if p.referenceOverride != nil { - r, overridden := p.referenceOverride(refid) - if overridden { - if r == nil { - return nil, false - } - return &reference{ - link: []byte(r.Link), - title: []byte(r.Title), - noteID: 0, - hasBlock: false, - text: []byte(r.Text)}, true - } - } - // refs are case insensitive - ref, found = p.refs[strings.ToLower(refid)] - return ref, found -} - -func (p *Markdown) finalize(block *Node) { - above := block.Parent - block.open = false - p.tip = above -} - -func (p *Markdown) addChild(node NodeType, offset uint32) *Node { - return p.addExistingChild(NewNode(node), offset) -} - -func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { - for !p.tip.canContain(node.Type) { - p.finalize(p.tip) - } - p.tip.AppendChild(node) - p.tip = node - return node -} - -func (p *Markdown) closeUnmatchedBlocks() { - if !p.allClosed { - for p.oldTip != p.lastMatchedContainer { - parent := p.oldTip.Parent - p.finalize(p.oldTip) - p.oldTip = parent - } - p.allClosed = true - } -} - -// -// -// Public interface -// -// - -// Reference represents the details of a link. -// See the documentation in Options for more details on use-case. -type Reference struct { - // Link is usually the URL the reference points to. - Link string - // Title is the alternate text describing the link in more detail. - Title string - // Text is the optional text to override the ref with if the syntax used was - // [refid][] - Text string -} - -// ReferenceOverrideFunc is expected to be called with a reference string and -// return either a valid Reference type that the reference string maps to or -// nil. If overridden is false, the default reference logic will be executed. -// See the documentation in Options for more details on use-case. -type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) - -// New constructs a Markdown processor. You can use the same With* functions as -// for Run() to customize parser's behavior and the renderer. -func New(opts ...Option) *Markdown { - var p Markdown - for _, opt := range opts { - opt(&p) - } - p.refs = make(map[string]*reference) - p.maxNesting = 16 - p.insideLink = false - docNode := NewNode(Document) - p.doc = docNode - p.tip = docNode - p.oldTip = docNode - p.lastMatchedContainer = docNode - p.allClosed = true - // register inline parsers - p.inlineCallback[' '] = maybeLineBreak - p.inlineCallback['*'] = emphasis - p.inlineCallback['_'] = emphasis - if p.extensions&Strikethrough != 0 { - p.inlineCallback['~'] = emphasis - } - p.inlineCallback['`'] = codeSpan - p.inlineCallback['\n'] = lineBreak - p.inlineCallback['['] = link - p.inlineCallback['<'] = leftAngle - p.inlineCallback['\\'] = escape - p.inlineCallback['&'] = entity - p.inlineCallback['!'] = maybeImage - p.inlineCallback['^'] = maybeInlineFootnote - if p.extensions&Autolink != 0 { - p.inlineCallback['h'] = maybeAutoLink - p.inlineCallback['m'] = maybeAutoLink - p.inlineCallback['f'] = maybeAutoLink - p.inlineCallback['H'] = maybeAutoLink - p.inlineCallback['M'] = maybeAutoLink - p.inlineCallback['F'] = maybeAutoLink - } - if p.extensions&Footnotes != 0 { - p.notes = make([]*reference, 0) - } - return &p -} - -// Option customizes the Markdown processor's default behavior. -type Option func(*Markdown) - -// WithRenderer allows you to override the default renderer. -func WithRenderer(r Renderer) Option { - return func(p *Markdown) { - p.renderer = r - } -} - -// WithExtensions allows you to pick some of the many extensions provided by -// Blackfriday. You can bitwise OR them. -func WithExtensions(e Extensions) Option { - return func(p *Markdown) { - p.extensions = e - } -} - -// WithNoExtensions turns off all extensions and custom behavior. -func WithNoExtensions() Option { - return func(p *Markdown) { - p.extensions = NoExtensions - p.renderer = NewHTMLRenderer(HTMLRendererParameters{ - Flags: HTMLFlagsNone, - }) - } -} - -// WithRefOverride sets an optional function callback that is called every -// time a reference is resolved. -// -// In Markdown, the link reference syntax can be made to resolve a link to -// a reference instead of an inline URL, in one of the following ways: -// -// * [link text][refid] -// * [refid][] -// -// Usually, the refid is defined at the bottom of the Markdown document. If -// this override function is provided, the refid is passed to the override -// function first, before consulting the defined refids at the bottom. If -// the override function indicates an override did not occur, the refids at -// the bottom will be used to fill in the link details. -func WithRefOverride(o ReferenceOverrideFunc) Option { - return func(p *Markdown) { - p.referenceOverride = o - } -} - -// Run is the main entry point to Blackfriday. It parses and renders a -// block of markdown-encoded text. -// -// The simplest invocation of Run takes one argument, input: -// output := Run(input) -// This will parse the input with CommonExtensions enabled and render it with -// the default HTMLRenderer (with CommonHTMLFlags). -// -// Variadic arguments opts can customize the default behavior. Since Markdown -// type does not contain exported fields, you can not use it directly. Instead, -// use the With* functions. For example, this will call the most basic -// functionality, with no extensions: -// output := Run(input, WithNoExtensions()) -// -// You can use any number of With* arguments, even contradicting ones. They -// will be applied in order of appearance and the latter will override the -// former: -// output := Run(input, WithNoExtensions(), WithExtensions(exts), -// WithRenderer(yourRenderer)) -func Run(input []byte, opts ...Option) []byte { - r := NewHTMLRenderer(HTMLRendererParameters{ - Flags: CommonHTMLFlags, - }) - optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} - optList = append(optList, opts...) - parser := New(optList...) - ast := parser.Parse(input) - var buf bytes.Buffer - parser.renderer.RenderHeader(&buf, ast) - ast.Walk(func(node *Node, entering bool) WalkStatus { - return parser.renderer.RenderNode(&buf, node, entering) - }) - parser.renderer.RenderFooter(&buf, ast) - return buf.Bytes() -} - -// Parse is an entry point to the parsing part of Blackfriday. It takes an -// input markdown document and produces a syntax tree for its contents. This -// tree can then be rendered with a default or custom renderer, or -// analyzed/transformed by the caller to whatever non-standard needs they have. -// The return value is the root node of the syntax tree. -func (p *Markdown) Parse(input []byte) *Node { - p.block(input) - // Walk the tree and finish up some of unfinished blocks - for p.tip != nil { - p.finalize(p.tip) - } - // Walk the tree again and process inline markdown in each block - p.doc.Walk(func(node *Node, entering bool) WalkStatus { - if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { - p.inline(node, node.content) - node.content = nil - } - return GoToNext - }) - p.parseRefsToAST() - return p.doc -} - -func (p *Markdown) parseRefsToAST() { - if p.extensions&Footnotes == 0 || len(p.notes) == 0 { - return - } - p.tip = p.doc - block := p.addBlock(List, nil) - block.IsFootnotesList = true - block.ListFlags = ListTypeOrdered - flags := ListItemBeginningOfList - // Note: this loop is intentionally explicit, not range-form. This is - // because the body of the loop will append nested footnotes to p.notes and - // we need to process those late additions. Range form would only walk over - // the fixed initial set. - for i := 0; i < len(p.notes); i++ { - ref := p.notes[i] - p.addExistingChild(ref.footnote, 0) - block := ref.footnote - block.ListFlags = flags | ListTypeOrdered - block.RefLink = ref.link - if ref.hasBlock { - flags |= ListItemContainsBlock - p.block(ref.title) - } else { - p.inline(block, ref.title) - } - flags &^= ListItemBeginningOfList | ListItemContainsBlock - } - above := block.Parent - finalizeList(block) - p.tip = above - block.Walk(func(node *Node, entering bool) WalkStatus { - if node.Type == Paragraph || node.Type == Heading { - p.inline(node, node.content) - node.content = nil - } - return GoToNext - }) -} - -// -// Link references -// -// This section implements support for references that (usually) appear -// as footnotes in a document, and can be referenced anywhere in the document. -// The basic format is: -// -// [1]: http://www.google.com/ "Google" -// [2]: http://www.github.com/ "Github" -// -// Anywhere in the document, the reference can be linked by referring to its -// label, i.e., 1 and 2 in this example, as in: -// -// This library is hosted on [Github][2], a git hosting site. -// -// Actual footnotes as specified in Pandoc and supported by some other Markdown -// libraries such as php-markdown are also taken care of. They look like this: -// -// This sentence needs a bit of further explanation.[^note] -// -// [^note]: This is the explanation. -// -// Footnotes should be placed at the end of the document in an ordered list. -// Finally, there are inline footnotes such as: -// -// Inline footnotes^[Also supported.] provide a quick inline explanation, -// but are rendered at the bottom of the document. -// - -// reference holds all information necessary for a reference-style links or -// footnotes. -// -// Consider this markdown with reference-style links: -// -// [link][ref] -// -// [ref]: /url/ "tooltip title" -// -// It will be ultimately converted to this HTML: -// -//

    link

    -// -// And a reference structure will be populated as follows: -// -// p.refs["ref"] = &reference{ -// link: "/url/", -// title: "tooltip title", -// } -// -// Alternatively, reference can contain information about a footnote. Consider -// this markdown: -// -// Text needing a footnote.[^a] -// -// [^a]: This is the note -// -// A reference structure will be populated as follows: -// -// p.refs["a"] = &reference{ -// link: "a", -// title: "This is the note", -// noteID: , -// } -// -// TODO: As you can see, it begs for splitting into two dedicated structures -// for refs and for footnotes. -type reference struct { - link []byte - title []byte - noteID int // 0 if not a footnote ref - hasBlock bool - footnote *Node // a link to the Item node within a list of footnotes - - text []byte // only gets populated by refOverride feature with Reference.Text -} - -func (r *reference) String() string { - return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", - r.link, r.title, r.text, r.noteID, r.hasBlock) -} - -// Check whether or not data starts with a reference link. -// If so, it is parsed and stored in the list of references -// (in the render struct). -// Returns the number of bytes to skip to move past it, -// or zero if the first line is not a reference. -func isReference(p *Markdown, data []byte, tabSize int) int { - // up to 3 optional leading spaces - if len(data) < 4 { - return 0 - } - i := 0 - for i < 3 && data[i] == ' ' { - i++ - } - - noteID := 0 - - // id part: anything but a newline between brackets - if data[i] != '[' { - return 0 - } - i++ - if p.extensions&Footnotes != 0 { - if i < len(data) && data[i] == '^' { - // we can set it to anything here because the proper noteIds will - // be assigned later during the second pass. It just has to be != 0 - noteID = 1 - i++ - } - } - idOffset := i - for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { - i++ - } - if i >= len(data) || data[i] != ']' { - return 0 - } - idEnd := i - // footnotes can have empty ID, like this: [^], but a reference can not be - // empty like this: []. Break early if it's not a footnote and there's no ID - if noteID == 0 && idOffset == idEnd { - return 0 - } - // spacer: colon (space | tab)* newline? (space | tab)* - i++ - if i >= len(data) || data[i] != ':' { - return 0 - } - i++ - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i < len(data) && (data[i] == '\n' || data[i] == '\r') { - i++ - if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { - i++ - } - } - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i >= len(data) { - return 0 - } - - var ( - linkOffset, linkEnd int - titleOffset, titleEnd int - lineEnd int - raw []byte - hasBlock bool - ) - - if p.extensions&Footnotes != 0 && noteID != 0 { - linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) - lineEnd = linkEnd - } else { - linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) - } - if lineEnd == 0 { - return 0 - } - - // a valid ref has been found - - ref := &reference{ - noteID: noteID, - hasBlock: hasBlock, - } - - if noteID > 0 { - // reusing the link field for the id since footnotes don't have links - ref.link = data[idOffset:idEnd] - // if footnote, it's not really a title, it's the contained text - ref.title = raw - } else { - ref.link = data[linkOffset:linkEnd] - ref.title = data[titleOffset:titleEnd] - } - - // id matches are case-insensitive - id := string(bytes.ToLower(data[idOffset:idEnd])) - - p.refs[id] = ref - - return lineEnd -} - -func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { - // link: whitespace-free sequence, optionally between angle brackets - if data[i] == '<' { - i++ - } - linkOffset = i - for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { - i++ - } - linkEnd = i - if data[linkOffset] == '<' && data[linkEnd-1] == '>' { - linkOffset++ - linkEnd-- - } - - // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { - return - } - - // compute end-of-line - if i >= len(data) || data[i] == '\r' || data[i] == '\n' { - lineEnd = i - } - if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { - lineEnd++ - } - - // optional (space|tab)* spacer after a newline - if lineEnd > 0 { - i = lineEnd + 1 - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - } - - // optional title: any non-newline sequence enclosed in '"() alone on its line - if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { - i++ - titleOffset = i - - // look for EOL - for i < len(data) && data[i] != '\n' && data[i] != '\r' { - i++ - } - if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { - titleEnd = i + 1 - } else { - titleEnd = i - } - - // step back - i-- - for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { - i-- - } - if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { - lineEnd = titleEnd - titleEnd = i - } - } - - return -} - -// The first bit of this logic is the same as Parser.listItem, but the rest -// is much simpler. This function simply finds the entire block and shifts it -// over by one tab if it is indeed a block (just returns the line if it's not). -// blockEnd is the end of the section in the input buffer, and contents is the -// extracted text that was shifted over one tab. It will need to be rendered at -// the end of the document. -func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { - if i == 0 || len(data) == 0 { - return - } - - // skip leading whitespace on first line - for i < len(data) && data[i] == ' ' { - i++ - } - - blockStart = i - - // find the end of the line - blockEnd = i - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // get working buffer - var raw bytes.Buffer - - // put the first line into the working buffer - raw.Write(data[blockEnd:i]) - blockEnd = i - - // process the following lines - containsBlankLine := false - -gatherLines: - for blockEnd < len(data) { - i++ - - // find the end of this line - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // if it is an empty line, guess that it is part of this item - // and move on to the next line - if p.isEmpty(data[blockEnd:i]) > 0 { - containsBlankLine = true - blockEnd = i - continue - } - - n := 0 - if n = isIndented(data[blockEnd:i], indentSize); n == 0 { - // this is the end of the block. - // we don't want to include this last line in the index. - break gatherLines - } - - // if there were blank lines before this one, insert a new one now - if containsBlankLine { - raw.WriteByte('\n') - containsBlankLine = false - } - - // get rid of that first tab, write to buffer - raw.Write(data[blockEnd+n : i]) - hasBlock = true - - blockEnd = i - } - - if data[blockEnd-1] != '\n' { - raw.WriteByte('\n') - } - - contents = raw.Bytes() - - return -} - -// -// -// Miscellaneous helper functions -// -// - -// Test if a character is a punctuation symbol. -// Taken from a private function in regexp in the stdlib. -func ispunct(c byte) bool { - for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { - if c == r { - return true - } - } - return false -} - -// Test if a character is a whitespace character. -func isspace(c byte) bool { - return ishorizontalspace(c) || isverticalspace(c) -} - -// Test if a character is a horizontal whitespace character. -func ishorizontalspace(c byte) bool { - return c == ' ' || c == '\t' -} - -// Test if a character is a vertical character. -func isverticalspace(c byte) bool { - return c == '\n' || c == '\r' || c == '\f' || c == '\v' -} - -// Test if a character is letter. -func isletter(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -// Test if a character is a letter or a digit. -// TODO: check when this is looking for ASCII alnum and when it should use unicode -func isalnum(c byte) bool { - return (c >= '0' && c <= '9') || isletter(c) -} - -// Replace tab characters with spaces, aligning to the next TAB_SIZE column. -// always ends output with a newline -func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { - // first, check for common cases: no tabs, or only tabs at beginning of line - i, prefix := 0, 0 - slowcase := false - for i = 0; i < len(line); i++ { - if line[i] == '\t' { - if prefix == i { - prefix++ - } else { - slowcase = true - break - } - } - } - - // no need to decode runes if all tabs are at the beginning of the line - if !slowcase { - for i = 0; i < prefix*tabSize; i++ { - out.WriteByte(' ') - } - out.Write(line[prefix:]) - return - } - - // the slow case: we need to count runes to figure out how - // many spaces to insert for each tab - column := 0 - i = 0 - for i < len(line) { - start := i - for i < len(line) && line[i] != '\t' { - _, size := utf8.DecodeRune(line[i:]) - i += size - column++ - } - - if i > start { - out.Write(line[start:i]) - } - - if i >= len(line) { - break - } - - for { - out.WriteByte(' ') - column++ - if column%tabSize == 0 { - break - } - } - - i++ - } -} - -// Find if a line counts as indented or not. -// Returns number of characters the indent is (0 = not indented). -func isIndented(data []byte, indentSize int) int { - if len(data) == 0 { - return 0 - } - if data[0] == '\t' { - return 1 - } - if len(data) < indentSize { - return 0 - } - for i := 0; i < indentSize; i++ { - if data[i] != ' ' { - return 0 - } - } - return indentSize -} - -// Create a url-safe slug for fragments -func slugify(in []byte) []byte { - if len(in) == 0 { - return in - } - out := make([]byte, 0, len(in)) - sym := false - - for _, ch := range in { - if isalnum(ch) { - sym = false - out = append(out, ch) - } else if sym { - continue - } else { - out = append(out, '-') - sym = true - } - } - var a, b int - var ch byte - for a, ch = range out { - if ch != '-' { - break - } - } - for b = len(out) - 1; b > 0; b-- { - if out[b] != '-' { - break - } - } - return out[a : b+1] -} diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go deleted file mode 100644 index 04e6050cee..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/node.go +++ /dev/null @@ -1,360 +0,0 @@ -package blackfriday - -import ( - "bytes" - "fmt" -) - -// NodeType specifies a type of a single node of a syntax tree. Usually one -// node (and its type) corresponds to a single markdown feature, e.g. emphasis -// or code block. -type NodeType int - -// Constants for identifying different types of nodes. See NodeType. -const ( - Document NodeType = iota - BlockQuote - List - Item - Paragraph - Heading - HorizontalRule - Emph - Strong - Del - Link - Image - Text - HTMLBlock - CodeBlock - Softbreak - Hardbreak - Code - HTMLSpan - Table - TableCell - TableHead - TableBody - TableRow -) - -var nodeTypeNames = []string{ - Document: "Document", - BlockQuote: "BlockQuote", - List: "List", - Item: "Item", - Paragraph: "Paragraph", - Heading: "Heading", - HorizontalRule: "HorizontalRule", - Emph: "Emph", - Strong: "Strong", - Del: "Del", - Link: "Link", - Image: "Image", - Text: "Text", - HTMLBlock: "HTMLBlock", - CodeBlock: "CodeBlock", - Softbreak: "Softbreak", - Hardbreak: "Hardbreak", - Code: "Code", - HTMLSpan: "HTMLSpan", - Table: "Table", - TableCell: "TableCell", - TableHead: "TableHead", - TableBody: "TableBody", - TableRow: "TableRow", -} - -func (t NodeType) String() string { - return nodeTypeNames[t] -} - -// ListData contains fields relevant to a List and Item node type. -type ListData struct { - ListFlags ListType - Tight bool // Skip

    s around list item data if true - BulletChar byte // '*', '+' or '-' in bullet lists - Delimiter byte // '.' or ')' after the number in ordered lists - RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering - IsFootnotesList bool // This is a list of footnotes -} - -// LinkData contains fields relevant to a Link node type. -type LinkData struct { - Destination []byte // Destination is what goes into a href - Title []byte // Title is the tooltip thing that goes in a title attribute - NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote - Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. -} - -// CodeBlockData contains fields relevant to a CodeBlock node type. -type CodeBlockData struct { - IsFenced bool // Specifies whether it's a fenced code block or an indented one - Info []byte // This holds the info string - FenceChar byte - FenceLength int - FenceOffset int -} - -// TableCellData contains fields relevant to a TableCell node type. -type TableCellData struct { - IsHeader bool // This tells if it's under the header row - Align CellAlignFlags // This holds the value for align attribute -} - -// HeadingData contains fields relevant to a Heading node type. -type HeadingData struct { - Level int // This holds the heading level number - HeadingID string // This might hold heading ID, if present - IsTitleblock bool // Specifies whether it's a title block -} - -// Node is a single element in the abstract syntax tree of the parsed document. -// It holds connections to the structurally neighboring nodes and, for certain -// types of nodes, additional information that might be needed when rendering. -type Node struct { - Type NodeType // Determines the type of the node - Parent *Node // Points to the parent - FirstChild *Node // Points to the first child, if any - LastChild *Node // Points to the last child, if any - Prev *Node // Previous sibling; nil if it's the first child - Next *Node // Next sibling; nil if it's the last child - - Literal []byte // Text contents of the leaf nodes - - HeadingData // Populated if Type is Heading - ListData // Populated if Type is List - CodeBlockData // Populated if Type is CodeBlock - LinkData // Populated if Type is Link - TableCellData // Populated if Type is TableCell - - content []byte // Markdown content of the block nodes - open bool // Specifies an open block node that has not been finished to process yet -} - -// NewNode allocates a node of a specified type. -func NewNode(typ NodeType) *Node { - return &Node{ - Type: typ, - open: true, - } -} - -func (n *Node) String() string { - ellipsis := "" - snippet := n.Literal - if len(snippet) > 16 { - snippet = snippet[:16] - ellipsis = "..." - } - return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) -} - -// Unlink removes node 'n' from the tree. -// It panics if the node is nil. -func (n *Node) Unlink() { - if n.Prev != nil { - n.Prev.Next = n.Next - } else if n.Parent != nil { - n.Parent.FirstChild = n.Next - } - if n.Next != nil { - n.Next.Prev = n.Prev - } else if n.Parent != nil { - n.Parent.LastChild = n.Prev - } - n.Parent = nil - n.Next = nil - n.Prev = nil -} - -// AppendChild adds a node 'child' as a child of 'n'. -// It panics if either node is nil. -func (n *Node) AppendChild(child *Node) { - child.Unlink() - child.Parent = n - if n.LastChild != nil { - n.LastChild.Next = child - child.Prev = n.LastChild - n.LastChild = child - } else { - n.FirstChild = child - n.LastChild = child - } -} - -// InsertBefore inserts 'sibling' immediately before 'n'. -// It panics if either node is nil. -func (n *Node) InsertBefore(sibling *Node) { - sibling.Unlink() - sibling.Prev = n.Prev - if sibling.Prev != nil { - sibling.Prev.Next = sibling - } - sibling.Next = n - n.Prev = sibling - sibling.Parent = n.Parent - if sibling.Prev == nil { - sibling.Parent.FirstChild = sibling - } -} - -// IsContainer returns true if 'n' can contain children. -func (n *Node) IsContainer() bool { - switch n.Type { - case Document: - fallthrough - case BlockQuote: - fallthrough - case List: - fallthrough - case Item: - fallthrough - case Paragraph: - fallthrough - case Heading: - fallthrough - case Emph: - fallthrough - case Strong: - fallthrough - case Del: - fallthrough - case Link: - fallthrough - case Image: - fallthrough - case Table: - fallthrough - case TableHead: - fallthrough - case TableBody: - fallthrough - case TableRow: - fallthrough - case TableCell: - return true - default: - return false - } -} - -// IsLeaf returns true if 'n' is a leaf node. -func (n *Node) IsLeaf() bool { - return !n.IsContainer() -} - -func (n *Node) canContain(t NodeType) bool { - if n.Type == List { - return t == Item - } - if n.Type == Document || n.Type == BlockQuote || n.Type == Item { - return t != Item - } - if n.Type == Table { - return t == TableHead || t == TableBody - } - if n.Type == TableHead || n.Type == TableBody { - return t == TableRow - } - if n.Type == TableRow { - return t == TableCell - } - return false -} - -// WalkStatus allows NodeVisitor to have some control over the tree traversal. -// It is returned from NodeVisitor and different values allow Node.Walk to -// decide which node to go to next. -type WalkStatus int - -const ( - // GoToNext is the default traversal of every node. - GoToNext WalkStatus = iota - // SkipChildren tells walker to skip all children of current node. - SkipChildren - // Terminate tells walker to terminate the traversal. - Terminate -) - -// NodeVisitor is a callback to be called when traversing the syntax tree. -// Called twice for every node: once with entering=true when the branch is -// first visited, then with entering=false after all the children are done. -type NodeVisitor func(node *Node, entering bool) WalkStatus - -// Walk is a convenience method that instantiates a walker and starts a -// traversal of subtree rooted at n. -func (n *Node) Walk(visitor NodeVisitor) { - w := newNodeWalker(n) - for w.current != nil { - status := visitor(w.current, w.entering) - switch status { - case GoToNext: - w.next() - case SkipChildren: - w.entering = false - w.next() - case Terminate: - return - } - } -} - -type nodeWalker struct { - current *Node - root *Node - entering bool -} - -func newNodeWalker(root *Node) *nodeWalker { - return &nodeWalker{ - current: root, - root: root, - entering: true, - } -} - -func (nw *nodeWalker) next() { - if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root { - nw.current = nil - return - } - if nw.entering && nw.current.IsContainer() { - if nw.current.FirstChild != nil { - nw.current = nw.current.FirstChild - nw.entering = true - } else { - nw.entering = false - } - } else if nw.current.Next == nil { - nw.current = nw.current.Parent - nw.entering = false - } else { - nw.current = nw.current.Next - nw.entering = true - } -} - -func dump(ast *Node) { - fmt.Println(dumpString(ast)) -} - -func dumpR(ast *Node, depth int) string { - if ast == nil { - return "" - } - indent := bytes.Repeat([]byte("\t"), depth) - content := ast.Literal - if content == nil { - content = ast.content - } - result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) - for n := ast.FirstChild; n != nil; n = n.Next { - result += dumpR(n, depth+1) - } - return result -} - -func dumpString(ast *Node) string { - return dumpR(ast, 0) -} diff --git a/vendor/github.com/russross/blackfriday/v2/smartypants.go b/vendor/github.com/russross/blackfriday/v2/smartypants.go deleted file mode 100644 index 3a220e9424..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/smartypants.go +++ /dev/null @@ -1,457 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// -// SmartyPants rendering -// -// - -package blackfriday - -import ( - "bytes" - "io" -) - -// SPRenderer is a struct containing state of a Smartypants renderer. -type SPRenderer struct { - inSingleQuote bool - inDoubleQuote bool - callbacks [256]smartCallback -} - -func wordBoundary(c byte) bool { - return c == 0 || isspace(c) || ispunct(c) -} - -func tolower(c byte) byte { - if c >= 'A' && c <= 'Z' { - return c - 'A' + 'a' - } - return c -} - -func isdigit(c byte) bool { - return c >= '0' && c <= '9' -} - -func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { - // edge of the buffer is likely to be a tag that we don't get to see, - // so we treat it like text sometimes - - // enumerate all sixteen possibilities for (previousChar, nextChar) - // each can be one of {0, space, punct, other} - switch { - case previousChar == 0 && nextChar == 0: - // context is not any help here, so toggle - *isOpen = !*isOpen - case isspace(previousChar) && nextChar == 0: - // [ "] might be [ "foo...] - *isOpen = true - case ispunct(previousChar) && nextChar == 0: - // [!"] hmm... could be [Run!"] or [("...] - *isOpen = false - case /* isnormal(previousChar) && */ nextChar == 0: - // [a"] is probably a close - *isOpen = false - case previousChar == 0 && isspace(nextChar): - // [" ] might be [...foo" ] - *isOpen = false - case isspace(previousChar) && isspace(nextChar): - // [ " ] context is not any help here, so toggle - *isOpen = !*isOpen - case ispunct(previousChar) && isspace(nextChar): - // [!" ] is probably a close - *isOpen = false - case /* isnormal(previousChar) && */ isspace(nextChar): - // [a" ] this is one of the easy cases - *isOpen = false - case previousChar == 0 && ispunct(nextChar): - // ["!] hmm... could be ["$1.95] or ["!...] - *isOpen = false - case isspace(previousChar) && ispunct(nextChar): - // [ "!] looks more like [ "$1.95] - *isOpen = true - case ispunct(previousChar) && ispunct(nextChar): - // [!"!] context is not any help here, so toggle - *isOpen = !*isOpen - case /* isnormal(previousChar) && */ ispunct(nextChar): - // [a"!] is probably a close - *isOpen = false - case previousChar == 0 /* && isnormal(nextChar) */ : - // ["a] is probably an open - *isOpen = true - case isspace(previousChar) /* && isnormal(nextChar) */ : - // [ "a] this is one of the easy cases - *isOpen = true - case ispunct(previousChar) /* && isnormal(nextChar) */ : - // [!"a] is probably an open - *isOpen = true - default: - // [a'b] maybe a contraction? - *isOpen = false - } - - // Note that with the limited lookahead, this non-breaking - // space will also be appended to single double quotes. - if addNBSP && !*isOpen { - out.WriteString(" ") - } - - out.WriteByte('&') - if *isOpen { - out.WriteByte('l') - } else { - out.WriteByte('r') - } - out.WriteByte(quote) - out.WriteString("quo;") - - if addNBSP && *isOpen { - out.WriteString(" ") - } - - return true -} - -func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 { - t1 := tolower(text[1]) - - if t1 == '\'' { - nextChar := byte(0) - if len(text) >= 3 { - nextChar = text[2] - } - if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { - return 1 - } - } - - if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { - out.WriteString("’") - return 0 - } - - if len(text) >= 3 { - t2 := tolower(text[2]) - - if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && - (len(text) < 4 || wordBoundary(text[3])) { - out.WriteString("’") - return 0 - } - } - } - - nextChar := byte(0) - if len(text) > 1 { - nextChar = text[1] - } - if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { - return 0 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 { - t1 := tolower(text[1]) - t2 := tolower(text[2]) - - if t1 == 'c' && t2 == ')' { - out.WriteString("©") - return 2 - } - - if t1 == 'r' && t2 == ')' { - out.WriteString("®") - return 2 - } - - if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { - out.WriteString("™") - return 3 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 { - if text[1] == '-' { - out.WriteString("—") - return 1 - } - - if wordBoundary(previousChar) && wordBoundary(text[1]) { - out.WriteString("–") - return 0 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 && text[1] == '-' && text[2] == '-' { - out.WriteString("—") - return 2 - } - if len(text) >= 2 && text[1] == '-' { - out.WriteString("–") - return 1 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { - if bytes.HasPrefix(text, []byte(""")) { - nextChar := byte(0) - if len(text) >= 7 { - nextChar = text[6] - } - if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { - return 5 - } - } - - if bytes.HasPrefix(text, []byte("�")) { - return 3 - } - - out.WriteByte('&') - return 0 -} - -func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { - var quote byte = 'd' - if angledQuotes { - quote = 'a' - } - - return func(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) - } -} - -func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 && text[1] == '.' && text[2] == '.' { - out.WriteString("…") - return 2 - } - - if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { - out.WriteString("…") - return 4 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 && text[1] == '`' { - nextChar := byte(0) - if len(text) >= 3 { - nextChar = text[2] - } - if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { - return 1 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { - if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { - // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b - // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) - // and avoid changing dates like 1/23/2005 into fractions. - numEnd := 0 - for len(text) > numEnd && isdigit(text[numEnd]) { - numEnd++ - } - if numEnd == 0 { - out.WriteByte(text[0]) - return 0 - } - denStart := numEnd + 1 - if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { - denStart = numEnd + 3 - } else if len(text) < numEnd+2 || text[numEnd] != '/' { - out.WriteByte(text[0]) - return 0 - } - denEnd := denStart - for len(text) > denEnd && isdigit(text[denEnd]) { - denEnd++ - } - if denEnd == denStart { - out.WriteByte(text[0]) - return 0 - } - if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { - out.WriteString("") - out.Write(text[:numEnd]) - out.WriteString("") - out.Write(text[denStart:denEnd]) - out.WriteString("") - return denEnd - 1 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { - if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { - if text[0] == '1' && text[1] == '/' && text[2] == '2' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { - out.WriteString("½") - return 2 - } - } - - if text[0] == '1' && text[1] == '/' && text[2] == '4' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { - out.WriteString("¼") - return 2 - } - } - - if text[0] == '3' && text[1] == '/' && text[2] == '4' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { - out.WriteString("¾") - return 2 - } - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { - nextChar := byte(0) - if len(text) > 1 { - nextChar = text[1] - } - if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { - out.WriteString(""") - } - - return 0 -} - -func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') -} - -func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') -} - -func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { - i := 0 - - for i < len(text) && text[i] != '>' { - i++ - } - - out.Write(text[:i+1]) - return i -} - -type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int - -// NewSmartypantsRenderer constructs a Smartypants renderer object. -func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { - var ( - r SPRenderer - - smartAmpAngled = r.smartAmp(true, false) - smartAmpAngledNBSP = r.smartAmp(true, true) - smartAmpRegular = r.smartAmp(false, false) - smartAmpRegularNBSP = r.smartAmp(false, true) - - addNBSP = flags&SmartypantsQuotesNBSP != 0 - ) - - if flags&SmartypantsAngledQuotes == 0 { - r.callbacks['"'] = r.smartDoubleQuote - if !addNBSP { - r.callbacks['&'] = smartAmpRegular - } else { - r.callbacks['&'] = smartAmpRegularNBSP - } - } else { - r.callbacks['"'] = r.smartAngledDoubleQuote - if !addNBSP { - r.callbacks['&'] = smartAmpAngled - } else { - r.callbacks['&'] = smartAmpAngledNBSP - } - } - r.callbacks['\''] = r.smartSingleQuote - r.callbacks['('] = r.smartParens - if flags&SmartypantsDashes != 0 { - if flags&SmartypantsLatexDashes == 0 { - r.callbacks['-'] = r.smartDash - } else { - r.callbacks['-'] = r.smartDashLatex - } - } - r.callbacks['.'] = r.smartPeriod - if flags&SmartypantsFractions == 0 { - r.callbacks['1'] = r.smartNumber - r.callbacks['3'] = r.smartNumber - } else { - for ch := '1'; ch <= '9'; ch++ { - r.callbacks[ch] = r.smartNumberGeneric - } - } - r.callbacks['<'] = r.smartLeftAngle - r.callbacks['`'] = r.smartBacktick - return &r -} - -// Process is the entry point of the Smartypants renderer. -func (r *SPRenderer) Process(w io.Writer, text []byte) { - mark := 0 - for i := 0; i < len(text); i++ { - if action := r.callbacks[text[i]]; action != nil { - if i > mark { - w.Write(text[mark:i]) - } - previousChar := byte(0) - if i > 0 { - previousChar = text[i-1] - } - var tmp bytes.Buffer - i += action(&tmp, previousChar, text[i:]) - w.Write(tmp.Bytes()) - mark = i + 1 - } - } - if mark < len(text) { - w.Write(text[mark:]) - } -} diff --git a/vendor/github.com/sagikazarmark/locafero/.editorconfig b/vendor/github.com/sagikazarmark/locafero/.editorconfig deleted file mode 100644 index 6f944f5406..0000000000 --- a/vendor/github.com/sagikazarmark/locafero/.editorconfig +++ /dev/null @@ -1,21 +0,0 @@ -root = true - -[*] -charset = utf-8 -end_of_line = lf -indent_size = 4 -indent_style = space -insert_final_newline = true -trim_trailing_whitespace = true - -[{Makefile,*.mk}] -indent_style = tab - -[*.nix] -indent_size = 2 - -[*.go] -indent_style = tab - -[{*.yml,*.yaml}] -indent_size = 2 diff --git a/vendor/github.com/sagikazarmark/locafero/.envrc b/vendor/github.com/sagikazarmark/locafero/.envrc deleted file mode 100644 index 3ce7171a3c..0000000000 --- a/vendor/github.com/sagikazarmark/locafero/.envrc +++ /dev/null @@ -1,4 +0,0 @@ -if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8=" -fi -use flake . --impure diff --git a/vendor/github.com/sagikazarmark/locafero/.gitignore b/vendor/github.com/sagikazarmark/locafero/.gitignore deleted file mode 100644 index 8f07e60163..0000000000 --- a/vendor/github.com/sagikazarmark/locafero/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -/.devenv/ -/.direnv/ -/.task/ -/bin/ -/build/ -/tmp/ -/var/ -/vendor/ diff --git a/vendor/github.com/sagikazarmark/locafero/.golangci.yaml b/vendor/github.com/sagikazarmark/locafero/.golangci.yaml deleted file mode 100644 index 829de2a4a0..0000000000 --- a/vendor/github.com/sagikazarmark/locafero/.golangci.yaml +++ /dev/null @@ -1,27 +0,0 @@ -run: - timeout: 10m - -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/sagikazarmark/locafero) - goimports: - local-prefixes: github.com/sagikazarmark/locafero - misspell: - locale: US - nolintlint: - allow-leading-space: false # require machine-readable nolint directives (with no leading space) - allow-unused: false # report any unused nolint directives - require-specific: false # don't require nolint directives to be specific about which linter is being skipped - revive: - confidence: 0 - -linters: - enable: - - gci - - goimports - - misspell - - nolintlint - - revive diff --git a/vendor/github.com/sagikazarmark/locafero/LICENSE b/vendor/github.com/sagikazarmark/locafero/LICENSE deleted file mode 100644 index a70b0f2960..0000000000 --- a/vendor/github.com/sagikazarmark/locafero/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2023 Márk Sági-Kazár - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is furnished -to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/sagikazarmark/locafero/README.md b/vendor/github.com/sagikazarmark/locafero/README.md deleted file mode 100644 index a48e8e9789..0000000000 --- a/vendor/github.com/sagikazarmark/locafero/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# Finder library for [Afero](https://github.com/spf13/afero) - -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sagikazarmark/locafero/ci.yaml?style=flat-square)](https://github.com/sagikazarmark/locafero/actions/workflows/ci.yaml) -[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/sagikazarmark/locafero) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) -[![built with nix](https://img.shields.io/badge/builtwith-nix-7d81f7?style=flat-square)](https://builtwithnix.org) - -**Finder library for [Afero](https://github.com/spf13/afero) ported from [go-finder](https://github.com/sagikazarmark/go-finder).** - -> [!WARNING] -> This is an experimental library under development. -> -> **Backwards compatibility is not guaranteed, expect breaking changes.** - -## Installation - -```shell -go get github.com/sagikazarmark/locafero -``` - -## Usage - -Check out the [package example](https://pkg.go.dev/github.com/sagikazarmark/locafero#example-package) on go.dev. - -## Development - -**For an optimal developer experience, it is recommended to install [Nix](https://nixos.org/download.html) and [direnv](https://direnv.net/docs/installation.html).** - -Run the test suite: - -```shell -just test -``` - -## License - -The project is licensed under the [MIT License](LICENSE). diff --git a/vendor/github.com/sagikazarmark/locafero/file_type.go b/vendor/github.com/sagikazarmark/locafero/file_type.go deleted file mode 100644 index 9a9b140233..0000000000 --- a/vendor/github.com/sagikazarmark/locafero/file_type.go +++ /dev/null @@ -1,28 +0,0 @@ -package locafero - -import "io/fs" - -// FileType represents the kind of entries [Finder] can return. -type FileType int - -const ( - FileTypeAll FileType = iota - FileTypeFile - FileTypeDir -) - -func (ft FileType) matchFileInfo(info fs.FileInfo) bool { - switch ft { - case FileTypeAll: - return true - - case FileTypeFile: - return !info.IsDir() - - case FileTypeDir: - return info.IsDir() - - default: - return false - } -} diff --git a/vendor/github.com/sagikazarmark/locafero/finder.go b/vendor/github.com/sagikazarmark/locafero/finder.go deleted file mode 100644 index 754c8b260e..0000000000 --- a/vendor/github.com/sagikazarmark/locafero/finder.go +++ /dev/null @@ -1,165 +0,0 @@ -// Package finder looks for files and directories in an {fs.Fs} filesystem. -package locafero - -import ( - "errors" - "io/fs" - "path/filepath" - "strings" - - "github.com/sourcegraph/conc/iter" - "github.com/spf13/afero" -) - -// Finder looks for files and directories in an [afero.Fs] filesystem. -type Finder struct { - // Paths represents a list of locations that the [Finder] will search in. - // - // They are essentially the root directories or starting points for the search. - // - // Examples: - // - home/user - // - etc - Paths []string - - // Names are specific entries that the [Finder] will look for within the given Paths. - // - // It provides the capability to search for entries with depth, - // meaning it can target deeper locations within the directory structure. - // - // It also supports glob syntax (as defined by [filepat.Match]), offering greater flexibility in search patterns. - // - // Examples: - // - config.yaml - // - home/*/config.yaml - // - home/*/config.* - Names []string - - // Type restricts the kind of entries returned by the [Finder]. - // - // This parameter helps in differentiating and filtering out files from directories or vice versa. - Type FileType -} - -// Find looks for files and directories in an [afero.Fs] filesystem. -func (f Finder) Find(fsys afero.Fs) ([]string, error) { - // Arbitrary go routine limit (TODO: make this a parameter) - // pool := pool.NewWithResults[[]string]().WithMaxGoroutines(5).WithErrors().WithFirstError() - - type searchItem struct { - path string - name string - } - - var searchItems []searchItem - - for _, searchPath := range f.Paths { - searchPath := searchPath - - for _, searchName := range f.Names { - searchName := searchName - - searchItems = append(searchItems, searchItem{searchPath, searchName}) - - // pool.Go(func() ([]string, error) { - // // If the name contains any glob character, perform a glob match - // if strings.ContainsAny(searchName, "*?[]\\^") { - // return globWalkSearch(fsys, searchPath, searchName, f.Type) - // } - // - // return statSearch(fsys, searchPath, searchName, f.Type) - // }) - } - } - - // allResults, err := pool.Wait() - // if err != nil { - // return nil, err - // } - - allResults, err := iter.MapErr(searchItems, func(item *searchItem) ([]string, error) { - // If the name contains any glob character, perform a glob match - if strings.ContainsAny(item.name, "*?[]\\^") { - return globWalkSearch(fsys, item.path, item.name, f.Type) - } - - return statSearch(fsys, item.path, item.name, f.Type) - }) - if err != nil { - return nil, err - } - - var results []string - - for _, r := range allResults { - results = append(results, r...) - } - - // Sort results in alphabetical order for now - // sort.Strings(results) - - return results, nil -} - -func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) { - var results []string - - err := afero.Walk(fsys, searchPath, func(p string, fileInfo fs.FileInfo, err error) error { - if err != nil { - return err - } - - // Skip the root path - if p == searchPath { - return nil - } - - var result error - - // Stop reading subdirectories - // TODO: add depth detection here - if fileInfo.IsDir() && filepath.Dir(p) == searchPath { - result = fs.SkipDir - } - - // Skip unmatching type - if !searchType.matchFileInfo(fileInfo) { - return result - } - - match, err := filepath.Match(searchName, fileInfo.Name()) - if err != nil { - return err - } - - if match { - results = append(results, p) - } - - return result - }) - if err != nil { - return results, err - } - - return results, nil -} - -func statSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) { - filePath := filepath.Join(searchPath, searchName) - - fileInfo, err := fsys.Stat(filePath) - if errors.Is(err, fs.ErrNotExist) { - return nil, nil - } - if err != nil { - return nil, err - } - - // Skip unmatching type - if !searchType.matchFileInfo(fileInfo) { - return nil, nil - } - - return []string{filePath}, nil -} diff --git a/vendor/github.com/sagikazarmark/locafero/flake.lock b/vendor/github.com/sagikazarmark/locafero/flake.lock deleted file mode 100644 index 46d28f805a..0000000000 --- a/vendor/github.com/sagikazarmark/locafero/flake.lock +++ /dev/null @@ -1,273 +0,0 @@ -{ - "nodes": { - "devenv": { - "inputs": { - "flake-compat": "flake-compat", - "nix": "nix", - "nixpkgs": "nixpkgs", - "pre-commit-hooks": "pre-commit-hooks" - }, - "locked": { - "lastModified": 1694097209, - "narHash": "sha256-gQmBjjxeSyySjbh0yQVBKApo2KWIFqqbRUvG+Fa+QpM=", - "owner": "cachix", - "repo": "devenv", - "rev": "7a8e6a91510efe89d8dcb8e43233f93e86f6b189", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-parts": { - "inputs": { - "nixpkgs-lib": "nixpkgs-lib" - }, - "locked": { - "lastModified": 1693611461, - "narHash": "sha256-aPODl8vAgGQ0ZYFIRisxYG5MOGSkIczvu2Cd8Gb9+1Y=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "7f53fdb7bdc5bb237da7fefef12d099e4fd611ca", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "flake-utils": { - "inputs": { - "systems": "systems" - }, - "locked": { - "lastModified": 1685518550, - "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "gitignore": { - "inputs": { - "nixpkgs": [ - "devenv", - "pre-commit-hooks", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1660459072, - "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", - "owner": "hercules-ci", - "repo": "gitignore.nix", - "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "gitignore.nix", - "type": "github" - } - }, - "lowdown-src": { - "flake": false, - "locked": { - "lastModified": 1633514407, - "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", - "owner": "kristapsdz", - "repo": "lowdown", - "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", - "type": "github" - }, - "original": { - "owner": "kristapsdz", - "repo": "lowdown", - "type": "github" - } - }, - "nix": { - "inputs": { - "lowdown-src": "lowdown-src", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression" - }, - "locked": { - "lastModified": 1676545802, - "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", - "owner": "domenkozar", - "repo": "nix", - "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "relaxed-flakes", - "repo": "nix", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1678875422, - "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "dir": "lib", - "lastModified": 1693471703, - "narHash": "sha256-0l03ZBL8P1P6z8MaSDS/MvuU8E75rVxe5eE1N6gxeTo=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "3e52e76b70d5508f3cec70b882a29199f4d1ee85", - "type": "github" - }, - "original": { - "dir": "lib", - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-stable": { - "locked": { - "lastModified": 1685801374, - "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-23.05", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1694343207, - "narHash": "sha256-jWi7OwFxU5Owi4k2JmiL1sa/OuBCQtpaAesuj5LXC8w=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "78058d810644f5ed276804ce7ea9e82d92bee293", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "flake-utils": "flake-utils", - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable" - }, - "locked": { - "lastModified": 1688056373, - "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "type": "github" - } - }, - "root": { - "inputs": { - "devenv": "devenv", - "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_2" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/vendor/github.com/sagikazarmark/locafero/flake.nix b/vendor/github.com/sagikazarmark/locafero/flake.nix deleted file mode 100644 index 209ecf2860..0000000000 --- a/vendor/github.com/sagikazarmark/locafero/flake.nix +++ /dev/null @@ -1,47 +0,0 @@ -{ - description = "Finder library for Afero"; - - inputs = { - nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; - flake-parts.url = "github:hercules-ci/flake-parts"; - devenv.url = "github:cachix/devenv"; - }; - - outputs = inputs@{ flake-parts, ... }: - flake-parts.lib.mkFlake { inherit inputs; } { - imports = [ - inputs.devenv.flakeModule - ]; - - systems = [ "x86_64-linux" "aarch64-darwin" ]; - - perSystem = { config, self', inputs', pkgs, system, ... }: rec { - devenv.shells = { - default = { - languages = { - go.enable = true; - }; - - packages = with pkgs; [ - just - - golangci-lint - ]; - - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; - }; - - ci = devenv.shells.default; - - ci_1_20 = { - imports = [ devenv.shells.ci ]; - - languages = { - go.package = pkgs.go_1_20; - }; - }; - }; - }; - }; -} diff --git a/vendor/github.com/sagikazarmark/locafero/helpers.go b/vendor/github.com/sagikazarmark/locafero/helpers.go deleted file mode 100644 index 05b434481f..0000000000 --- a/vendor/github.com/sagikazarmark/locafero/helpers.go +++ /dev/null @@ -1,41 +0,0 @@ -package locafero - -import "fmt" - -// NameWithExtensions creates a list of names from a base name and a list of extensions. -// -// TODO: find a better name for this function. -func NameWithExtensions(baseName string, extensions ...string) []string { - var names []string - - if baseName == "" { - return names - } - - for _, ext := range extensions { - if ext == "" { - continue - } - - names = append(names, fmt.Sprintf("%s.%s", baseName, ext)) - } - - return names -} - -// NameWithOptionalExtensions creates a list of names from a base name and a list of extensions, -// plus it adds the base name (without any extensions) to the end of the list. -// -// TODO: find a better name for this function. -func NameWithOptionalExtensions(baseName string, extensions ...string) []string { - var names []string - - if baseName == "" { - return names - } - - names = NameWithExtensions(baseName, extensions...) - names = append(names, baseName) - - return names -} diff --git a/vendor/github.com/sagikazarmark/locafero/justfile b/vendor/github.com/sagikazarmark/locafero/justfile deleted file mode 100644 index 00a88850cc..0000000000 --- a/vendor/github.com/sagikazarmark/locafero/justfile +++ /dev/null @@ -1,11 +0,0 @@ -default: - just --list - -test: - go test -race -v ./... - -lint: - golangci-lint run - -fmt: - golangci-lint run --fix diff --git a/vendor/github.com/sagikazarmark/slog-shim/.editorconfig b/vendor/github.com/sagikazarmark/slog-shim/.editorconfig deleted file mode 100644 index 1fb0e1bec6..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/.editorconfig +++ /dev/null @@ -1,18 +0,0 @@ -root = true - -[*] -charset = utf-8 -end_of_line = lf -indent_size = 4 -indent_style = space -insert_final_newline = true -trim_trailing_whitespace = true - -[*.nix] -indent_size = 2 - -[{Makefile,*.mk}] -indent_style = tab - -[Taskfile.yaml] -indent_size = 2 diff --git a/vendor/github.com/sagikazarmark/slog-shim/.envrc b/vendor/github.com/sagikazarmark/slog-shim/.envrc deleted file mode 100644 index 3ce7171a3c..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/.envrc +++ /dev/null @@ -1,4 +0,0 @@ -if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8=" -fi -use flake . --impure diff --git a/vendor/github.com/sagikazarmark/slog-shim/.gitignore b/vendor/github.com/sagikazarmark/slog-shim/.gitignore deleted file mode 100644 index dc6d8b5875..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/.devenv/ -/.direnv/ -/.task/ -/build/ diff --git a/vendor/github.com/sagikazarmark/slog-shim/LICENSE b/vendor/github.com/sagikazarmark/slog-shim/LICENSE deleted file mode 100644 index 6a66aea5ea..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/sagikazarmark/slog-shim/README.md b/vendor/github.com/sagikazarmark/slog-shim/README.md deleted file mode 100644 index 1f5be85e10..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# [slog](https://pkg.go.dev/log/slog) shim - -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sagikazarmark/slog-shim/ci.yaml?style=flat-square)](https://github.com/sagikazarmark/slog-shim/actions/workflows/ci.yaml) -[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/sagikazarmark/slog-shim) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) -[![built with nix](https://img.shields.io/badge/builtwith-nix-7d81f7?style=flat-square)](https://builtwithnix.org) - -Go 1.21 introduced a [new structured logging package](https://golang.org/doc/go1.21#slog), `log/slog`, to the standard library. -Although it's been eagerly anticipated by many, widespread adoption isn't expected to occur immediately, -especially since updating to Go 1.21 is a decision that most libraries won't make overnight. - -Before this package was added to the standard library, there was an _experimental_ version available at [golang.org/x/exp/slog](https://pkg.go.dev/golang.org/x/exp/slog). -While it's generally advised against using experimental packages in production, -this one served as a sort of backport package for the last few years, -incorporating new features before they were added to the standard library (like `slices`, `maps` or `errors`). - -This package serves as a bridge, helping libraries integrate slog in a backward-compatible way without having to immediately update their Go version requirement to 1.21. On Go 1.21 (and above), it acts as a drop-in replacement for `log/slog`, while below 1.21 it falls back to `golang.org/x/exp/slog`. - -**How does it achieve backwards compatibility?** - -Although there's no consensus on whether dropping support for older Go versions is considered backward compatible, a majority seems to believe it is. -(I don't have scientific proof for this, but it's based on conversations with various individuals across different channels.) - -This package adheres to that interpretation of backward compatibility. On Go 1.21, the shim uses type aliases to offer the same API as `slog/log`. -Once a library upgrades its version requirement to Go 1.21, it should be able to discard this shim and use `log/slog` directly. - -For older Go versions, the library might become unstable after removing the shim. -However, since those older versions are no longer supported, the promise of backward compatibility remains intact. - -## Installation - -```shell -go get github.com/sagikazarmark/slog-shim -``` - -## Usage - -Import this package into your library and use it in your public API: - -```go -package mylib - -import slog "github.com/sagikazarmark/slog-shim" - -func New(logger *slog.Logger) MyLib { - // ... -} -``` - -When using the library, clients can either use `log/slog` (when on Go 1.21) or `golang.org/x/exp/slog` (below Go 1.21): - -```go -package main - -import "log/slog" - -// OR - -import "golang.org/x/exp/slog" - -mylib.New(slog.Default()) -``` - -**Make sure consumers are aware that your API behaves differently on different Go versions.** - -Once you bump your Go version requirement to Go 1.21, you can drop the shim entirely from your code: - -```diff -package mylib - -- import slog "github.com/sagikazarmark/slog-shim" -+ import "log/slog" - -func New(logger *slog.Logger) MyLib { - // ... -} -``` - -## License - -The project is licensed under a [BSD-style license](LICENSE). diff --git a/vendor/github.com/sagikazarmark/slog-shim/attr.go b/vendor/github.com/sagikazarmark/slog-shim/attr.go deleted file mode 100644 index 89608bf3a7..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/attr.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "log/slog" - "time" -) - -// An Attr is a key-value pair. -type Attr = slog.Attr - -// String returns an Attr for a string value. -func String(key, value string) Attr { - return slog.String(key, value) -} - -// Int64 returns an Attr for an int64. -func Int64(key string, value int64) Attr { - return slog.Int64(key, value) -} - -// Int converts an int to an int64 and returns -// an Attr with that value. -func Int(key string, value int) Attr { - return slog.Int(key, value) -} - -// Uint64 returns an Attr for a uint64. -func Uint64(key string, v uint64) Attr { - return slog.Uint64(key, v) -} - -// Float64 returns an Attr for a floating-point number. -func Float64(key string, v float64) Attr { - return slog.Float64(key, v) -} - -// Bool returns an Attr for a bool. -func Bool(key string, v bool) Attr { - return slog.Bool(key, v) -} - -// Time returns an Attr for a time.Time. -// It discards the monotonic portion. -func Time(key string, v time.Time) Attr { - return slog.Time(key, v) -} - -// Duration returns an Attr for a time.Duration. -func Duration(key string, v time.Duration) Attr { - return slog.Duration(key, v) -} - -// Group returns an Attr for a Group Value. -// The first argument is the key; the remaining arguments -// are converted to Attrs as in [Logger.Log]. -// -// Use Group to collect several key-value pairs under a single -// key on a log line, or as the result of LogValue -// in order to log a single value as multiple Attrs. -func Group(key string, args ...any) Attr { - return slog.Group(key, args...) -} - -// Any returns an Attr for the supplied value. -// See [Value.AnyValue] for how values are treated. -func Any(key string, value any) Attr { - return slog.Any(key, value) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/attr_120.go b/vendor/github.com/sagikazarmark/slog-shim/attr_120.go deleted file mode 100644 index b664813331..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/attr_120.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "time" - - "golang.org/x/exp/slog" -) - -// An Attr is a key-value pair. -type Attr = slog.Attr - -// String returns an Attr for a string value. -func String(key, value string) Attr { - return slog.String(key, value) -} - -// Int64 returns an Attr for an int64. -func Int64(key string, value int64) Attr { - return slog.Int64(key, value) -} - -// Int converts an int to an int64 and returns -// an Attr with that value. -func Int(key string, value int) Attr { - return slog.Int(key, value) -} - -// Uint64 returns an Attr for a uint64. -func Uint64(key string, v uint64) Attr { - return slog.Uint64(key, v) -} - -// Float64 returns an Attr for a floating-point number. -func Float64(key string, v float64) Attr { - return slog.Float64(key, v) -} - -// Bool returns an Attr for a bool. -func Bool(key string, v bool) Attr { - return slog.Bool(key, v) -} - -// Time returns an Attr for a time.Time. -// It discards the monotonic portion. -func Time(key string, v time.Time) Attr { - return slog.Time(key, v) -} - -// Duration returns an Attr for a time.Duration. -func Duration(key string, v time.Duration) Attr { - return slog.Duration(key, v) -} - -// Group returns an Attr for a Group Value. -// The first argument is the key; the remaining arguments -// are converted to Attrs as in [Logger.Log]. -// -// Use Group to collect several key-value pairs under a single -// key on a log line, or as the result of LogValue -// in order to log a single value as multiple Attrs. -func Group(key string, args ...any) Attr { - return slog.Group(key, args...) -} - -// Any returns an Attr for the supplied value. -// See [Value.AnyValue] for how values are treated. -func Any(key string, value any) Attr { - return slog.Any(key, value) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/flake.lock b/vendor/github.com/sagikazarmark/slog-shim/flake.lock deleted file mode 100644 index 7e8898e9e3..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/flake.lock +++ /dev/null @@ -1,273 +0,0 @@ -{ - "nodes": { - "devenv": { - "inputs": { - "flake-compat": "flake-compat", - "nix": "nix", - "nixpkgs": "nixpkgs", - "pre-commit-hooks": "pre-commit-hooks" - }, - "locked": { - "lastModified": 1694097209, - "narHash": "sha256-gQmBjjxeSyySjbh0yQVBKApo2KWIFqqbRUvG+Fa+QpM=", - "owner": "cachix", - "repo": "devenv", - "rev": "7a8e6a91510efe89d8dcb8e43233f93e86f6b189", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-parts": { - "inputs": { - "nixpkgs-lib": "nixpkgs-lib" - }, - "locked": { - "lastModified": 1693611461, - "narHash": "sha256-aPODl8vAgGQ0ZYFIRisxYG5MOGSkIczvu2Cd8Gb9+1Y=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "7f53fdb7bdc5bb237da7fefef12d099e4fd611ca", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "flake-utils": { - "inputs": { - "systems": "systems" - }, - "locked": { - "lastModified": 1685518550, - "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "gitignore": { - "inputs": { - "nixpkgs": [ - "devenv", - "pre-commit-hooks", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1660459072, - "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", - "owner": "hercules-ci", - "repo": "gitignore.nix", - "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "gitignore.nix", - "type": "github" - } - }, - "lowdown-src": { - "flake": false, - "locked": { - "lastModified": 1633514407, - "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", - "owner": "kristapsdz", - "repo": "lowdown", - "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", - "type": "github" - }, - "original": { - "owner": "kristapsdz", - "repo": "lowdown", - "type": "github" - } - }, - "nix": { - "inputs": { - "lowdown-src": "lowdown-src", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression" - }, - "locked": { - "lastModified": 1676545802, - "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", - "owner": "domenkozar", - "repo": "nix", - "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "relaxed-flakes", - "repo": "nix", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1678875422, - "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "dir": "lib", - "lastModified": 1693471703, - "narHash": "sha256-0l03ZBL8P1P6z8MaSDS/MvuU8E75rVxe5eE1N6gxeTo=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "3e52e76b70d5508f3cec70b882a29199f4d1ee85", - "type": "github" - }, - "original": { - "dir": "lib", - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-stable": { - "locked": { - "lastModified": 1685801374, - "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-23.05", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1694345580, - "narHash": "sha256-BbG0NUxQTz1dN/Y87yPWZc/0Kp/coJ0vM3+7sNa5kUM=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "f002de6834fdde9c864f33c1ec51da7df19cd832", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "master", - "repo": "nixpkgs", - "type": "github" - } - }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "flake-utils": "flake-utils", - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable" - }, - "locked": { - "lastModified": 1688056373, - "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "type": "github" - } - }, - "root": { - "inputs": { - "devenv": "devenv", - "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_2" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/flake.nix b/vendor/github.com/sagikazarmark/slog-shim/flake.nix deleted file mode 100644 index 7239bbc2ec..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/flake.nix +++ /dev/null @@ -1,57 +0,0 @@ -{ - inputs = { - # nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; - nixpkgs.url = "github:NixOS/nixpkgs/master"; - flake-parts.url = "github:hercules-ci/flake-parts"; - devenv.url = "github:cachix/devenv"; - }; - - outputs = inputs@{ flake-parts, ... }: - flake-parts.lib.mkFlake { inherit inputs; } { - imports = [ - inputs.devenv.flakeModule - ]; - - systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; - - perSystem = { config, self', inputs', pkgs, system, ... }: rec { - devenv.shells = { - default = { - languages = { - go.enable = true; - go.package = pkgs.lib.mkDefault pkgs.go_1_21; - }; - - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; - }; - - ci = devenv.shells.default; - - ci_1_19 = { - imports = [ devenv.shells.ci ]; - - languages = { - go.package = pkgs.go_1_19; - }; - }; - - ci_1_20 = { - imports = [ devenv.shells.ci ]; - - languages = { - go.package = pkgs.go_1_20; - }; - }; - - ci_1_21 = { - imports = [ devenv.shells.ci ]; - - languages = { - go.package = pkgs.go_1_21; - }; - }; - }; - }; - }; -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/handler.go b/vendor/github.com/sagikazarmark/slog-shim/handler.go deleted file mode 100644 index f55556ae18..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/handler.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "log/slog" -) - -// A Handler handles log records produced by a Logger.. -// -// A typical handler may print log records to standard error, -// or write them to a file or database, or perhaps augment them -// with additional attributes and pass them on to another handler. -// -// Any of the Handler's methods may be called concurrently with itself -// or with other methods. It is the responsibility of the Handler to -// manage this concurrency. -// -// Users of the slog package should not invoke Handler methods directly. -// They should use the methods of [Logger] instead. -type Handler = slog.Handler - -// HandlerOptions are options for a TextHandler or JSONHandler. -// A zero HandlerOptions consists entirely of default values. -type HandlerOptions = slog.HandlerOptions - -// Keys for "built-in" attributes. -const ( - // TimeKey is the key used by the built-in handlers for the time - // when the log method is called. The associated Value is a [time.Time]. - TimeKey = slog.TimeKey - // LevelKey is the key used by the built-in handlers for the level - // of the log call. The associated value is a [Level]. - LevelKey = slog.LevelKey - // MessageKey is the key used by the built-in handlers for the - // message of the log call. The associated value is a string. - MessageKey = slog.MessageKey - // SourceKey is the key used by the built-in handlers for the source file - // and line of the log call. The associated value is a string. - SourceKey = slog.SourceKey -) diff --git a/vendor/github.com/sagikazarmark/slog-shim/handler_120.go b/vendor/github.com/sagikazarmark/slog-shim/handler_120.go deleted file mode 100644 index 670057573f..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/handler_120.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "golang.org/x/exp/slog" -) - -// A Handler handles log records produced by a Logger.. -// -// A typical handler may print log records to standard error, -// or write them to a file or database, or perhaps augment them -// with additional attributes and pass them on to another handler. -// -// Any of the Handler's methods may be called concurrently with itself -// or with other methods. It is the responsibility of the Handler to -// manage this concurrency. -// -// Users of the slog package should not invoke Handler methods directly. -// They should use the methods of [Logger] instead. -type Handler = slog.Handler - -// HandlerOptions are options for a TextHandler or JSONHandler. -// A zero HandlerOptions consists entirely of default values. -type HandlerOptions = slog.HandlerOptions - -// Keys for "built-in" attributes. -const ( - // TimeKey is the key used by the built-in handlers for the time - // when the log method is called. The associated Value is a [time.Time]. - TimeKey = slog.TimeKey - // LevelKey is the key used by the built-in handlers for the level - // of the log call. The associated value is a [Level]. - LevelKey = slog.LevelKey - // MessageKey is the key used by the built-in handlers for the - // message of the log call. The associated value is a string. - MessageKey = slog.MessageKey - // SourceKey is the key used by the built-in handlers for the source file - // and line of the log call. The associated value is a string. - SourceKey = slog.SourceKey -) diff --git a/vendor/github.com/sagikazarmark/slog-shim/json_handler.go b/vendor/github.com/sagikazarmark/slog-shim/json_handler.go deleted file mode 100644 index 7c22bd81e4..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/json_handler.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "io" - "log/slog" -) - -// JSONHandler is a Handler that writes Records to an io.Writer as -// line-delimited JSON objects. -type JSONHandler = slog.JSONHandler - -// NewJSONHandler creates a JSONHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler { - return slog.NewJSONHandler(w, opts) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go b/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go deleted file mode 100644 index 7b14f10ba9..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "io" - - "golang.org/x/exp/slog" -) - -// JSONHandler is a Handler that writes Records to an io.Writer as -// line-delimited JSON objects. -type JSONHandler = slog.JSONHandler - -// NewJSONHandler creates a JSONHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler { - return slog.NewJSONHandler(w, opts) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/level.go b/vendor/github.com/sagikazarmark/slog-shim/level.go deleted file mode 100644 index 07288cf891..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/level.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "log/slog" -) - -// A Level is the importance or severity of a log event. -// The higher the level, the more important or severe the event. -type Level = slog.Level - -// Level numbers are inherently arbitrary, -// but we picked them to satisfy three constraints. -// Any system can map them to another numbering scheme if it wishes. -// -// First, we wanted the default level to be Info, Since Levels are ints, Info is -// the default value for int, zero. -// -// Second, we wanted to make it easy to use levels to specify logger verbosity. -// Since a larger level means a more severe event, a logger that accepts events -// with smaller (or more negative) level means a more verbose logger. Logger -// verbosity is thus the negation of event severity, and the default verbosity -// of 0 accepts all events at least as severe as INFO. -// -// Third, we wanted some room between levels to accommodate schemes with named -// levels between ours. For example, Google Cloud Logging defines a Notice level -// between Info and Warn. Since there are only a few of these intermediate -// levels, the gap between the numbers need not be large. Our gap of 4 matches -// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the -// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog -// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog -// does not. But those OpenTelemetry levels can still be represented as slog -// Levels by using the appropriate integers. -// -// Names for common levels. -const ( - LevelDebug Level = slog.LevelDebug - LevelInfo Level = slog.LevelInfo - LevelWarn Level = slog.LevelWarn - LevelError Level = slog.LevelError -) - -// A LevelVar is a Level variable, to allow a Handler level to change -// dynamically. -// It implements Leveler as well as a Set method, -// and it is safe for use by multiple goroutines. -// The zero LevelVar corresponds to LevelInfo. -type LevelVar = slog.LevelVar - -// A Leveler provides a Level value. -// -// As Level itself implements Leveler, clients typically supply -// a Level value wherever a Leveler is needed, such as in HandlerOptions. -// Clients who need to vary the level dynamically can provide a more complex -// Leveler implementation such as *LevelVar. -type Leveler = slog.Leveler diff --git a/vendor/github.com/sagikazarmark/slog-shim/level_120.go b/vendor/github.com/sagikazarmark/slog-shim/level_120.go deleted file mode 100644 index d3feb94203..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/level_120.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "golang.org/x/exp/slog" -) - -// A Level is the importance or severity of a log event. -// The higher the level, the more important or severe the event. -type Level = slog.Level - -// Level numbers are inherently arbitrary, -// but we picked them to satisfy three constraints. -// Any system can map them to another numbering scheme if it wishes. -// -// First, we wanted the default level to be Info, Since Levels are ints, Info is -// the default value for int, zero. -// -// Second, we wanted to make it easy to use levels to specify logger verbosity. -// Since a larger level means a more severe event, a logger that accepts events -// with smaller (or more negative) level means a more verbose logger. Logger -// verbosity is thus the negation of event severity, and the default verbosity -// of 0 accepts all events at least as severe as INFO. -// -// Third, we wanted some room between levels to accommodate schemes with named -// levels between ours. For example, Google Cloud Logging defines a Notice level -// between Info and Warn. Since there are only a few of these intermediate -// levels, the gap between the numbers need not be large. Our gap of 4 matches -// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the -// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog -// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog -// does not. But those OpenTelemetry levels can still be represented as slog -// Levels by using the appropriate integers. -// -// Names for common levels. -const ( - LevelDebug Level = slog.LevelDebug - LevelInfo Level = slog.LevelInfo - LevelWarn Level = slog.LevelWarn - LevelError Level = slog.LevelError -) - -// A LevelVar is a Level variable, to allow a Handler level to change -// dynamically. -// It implements Leveler as well as a Set method, -// and it is safe for use by multiple goroutines. -// The zero LevelVar corresponds to LevelInfo. -type LevelVar = slog.LevelVar - -// A Leveler provides a Level value. -// -// As Level itself implements Leveler, clients typically supply -// a Level value wherever a Leveler is needed, such as in HandlerOptions. -// Clients who need to vary the level dynamically can provide a more complex -// Leveler implementation such as *LevelVar. -type Leveler = slog.Leveler diff --git a/vendor/github.com/sagikazarmark/slog-shim/logger.go b/vendor/github.com/sagikazarmark/slog-shim/logger.go deleted file mode 100644 index e80036bec5..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/logger.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "context" - "log" - "log/slog" -) - -// Default returns the default Logger. -func Default() *Logger { return slog.Default() } - -// SetDefault makes l the default Logger. -// After this call, output from the log package's default Logger -// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler. -func SetDefault(l *Logger) { - slog.SetDefault(l) -} - -// A Logger records structured information about each call to its -// Log, Debug, Info, Warn, and Error methods. -// For each call, it creates a Record and passes it to a Handler. -// -// To create a new Logger, call [New] or a Logger method -// that begins "With". -type Logger = slog.Logger - -// New creates a new Logger with the given non-nil Handler. -func New(h Handler) *Logger { - return slog.New(h) -} - -// With calls Logger.With on the default logger. -func With(args ...any) *Logger { - return slog.With(args...) -} - -// NewLogLogger returns a new log.Logger such that each call to its Output method -// dispatches a Record to the specified handler. The logger acts as a bridge from -// the older log API to newer structured logging handlers. -func NewLogLogger(h Handler, level Level) *log.Logger { - return slog.NewLogLogger(h, level) -} - -// Debug calls Logger.Debug on the default logger. -func Debug(msg string, args ...any) { - slog.Debug(msg, args...) -} - -// DebugContext calls Logger.DebugContext on the default logger. -func DebugContext(ctx context.Context, msg string, args ...any) { - slog.DebugContext(ctx, msg, args...) -} - -// Info calls Logger.Info on the default logger. -func Info(msg string, args ...any) { - slog.Info(msg, args...) -} - -// InfoContext calls Logger.InfoContext on the default logger. -func InfoContext(ctx context.Context, msg string, args ...any) { - slog.InfoContext(ctx, msg, args...) -} - -// Warn calls Logger.Warn on the default logger. -func Warn(msg string, args ...any) { - slog.Warn(msg, args...) -} - -// WarnContext calls Logger.WarnContext on the default logger. -func WarnContext(ctx context.Context, msg string, args ...any) { - slog.WarnContext(ctx, msg, args...) -} - -// Error calls Logger.Error on the default logger. -func Error(msg string, args ...any) { - slog.Error(msg, args...) -} - -// ErrorContext calls Logger.ErrorContext on the default logger. -func ErrorContext(ctx context.Context, msg string, args ...any) { - slog.ErrorContext(ctx, msg, args...) -} - -// Log calls Logger.Log on the default logger. -func Log(ctx context.Context, level Level, msg string, args ...any) { - slog.Log(ctx, level, msg, args...) -} - -// LogAttrs calls Logger.LogAttrs on the default logger. -func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { - slog.LogAttrs(ctx, level, msg, attrs...) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/logger_120.go b/vendor/github.com/sagikazarmark/slog-shim/logger_120.go deleted file mode 100644 index 97ebdd5e1c..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/logger_120.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "context" - "log" - - "golang.org/x/exp/slog" -) - -// Default returns the default Logger. -func Default() *Logger { return slog.Default() } - -// SetDefault makes l the default Logger. -// After this call, output from the log package's default Logger -// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler. -func SetDefault(l *Logger) { - slog.SetDefault(l) -} - -// A Logger records structured information about each call to its -// Log, Debug, Info, Warn, and Error methods. -// For each call, it creates a Record and passes it to a Handler. -// -// To create a new Logger, call [New] or a Logger method -// that begins "With". -type Logger = slog.Logger - -// New creates a new Logger with the given non-nil Handler. -func New(h Handler) *Logger { - return slog.New(h) -} - -// With calls Logger.With on the default logger. -func With(args ...any) *Logger { - return slog.With(args...) -} - -// NewLogLogger returns a new log.Logger such that each call to its Output method -// dispatches a Record to the specified handler. The logger acts as a bridge from -// the older log API to newer structured logging handlers. -func NewLogLogger(h Handler, level Level) *log.Logger { - return slog.NewLogLogger(h, level) -} - -// Debug calls Logger.Debug on the default logger. -func Debug(msg string, args ...any) { - slog.Debug(msg, args...) -} - -// DebugContext calls Logger.DebugContext on the default logger. -func DebugContext(ctx context.Context, msg string, args ...any) { - slog.DebugContext(ctx, msg, args...) -} - -// Info calls Logger.Info on the default logger. -func Info(msg string, args ...any) { - slog.Info(msg, args...) -} - -// InfoContext calls Logger.InfoContext on the default logger. -func InfoContext(ctx context.Context, msg string, args ...any) { - slog.InfoContext(ctx, msg, args...) -} - -// Warn calls Logger.Warn on the default logger. -func Warn(msg string, args ...any) { - slog.Warn(msg, args...) -} - -// WarnContext calls Logger.WarnContext on the default logger. -func WarnContext(ctx context.Context, msg string, args ...any) { - slog.WarnContext(ctx, msg, args...) -} - -// Error calls Logger.Error on the default logger. -func Error(msg string, args ...any) { - slog.Error(msg, args...) -} - -// ErrorContext calls Logger.ErrorContext on the default logger. -func ErrorContext(ctx context.Context, msg string, args ...any) { - slog.ErrorContext(ctx, msg, args...) -} - -// Log calls Logger.Log on the default logger. -func Log(ctx context.Context, level Level, msg string, args ...any) { - slog.Log(ctx, level, msg, args...) -} - -// LogAttrs calls Logger.LogAttrs on the default logger. -func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { - slog.LogAttrs(ctx, level, msg, attrs...) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/record.go b/vendor/github.com/sagikazarmark/slog-shim/record.go deleted file mode 100644 index 85ad1f7842..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/record.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "log/slog" - "time" -) - -// A Record holds information about a log event. -// Copies of a Record share state. -// Do not modify a Record after handing out a copy to it. -// Call [NewRecord] to create a new Record. -// Use [Record.Clone] to create a copy with no shared state. -type Record = slog.Record - -// NewRecord creates a Record from the given arguments. -// Use [Record.AddAttrs] to add attributes to the Record. -// -// NewRecord is intended for logging APIs that want to support a [Handler] as -// a backend. -func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record { - return slog.NewRecord(t, level, msg, pc) -} - -// Source describes the location of a line of source code. -type Source = slog.Source diff --git a/vendor/github.com/sagikazarmark/slog-shim/record_120.go b/vendor/github.com/sagikazarmark/slog-shim/record_120.go deleted file mode 100644 index c2eaf4e796..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/record_120.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "time" - - "golang.org/x/exp/slog" -) - -// A Record holds information about a log event. -// Copies of a Record share state. -// Do not modify a Record after handing out a copy to it. -// Call [NewRecord] to create a new Record. -// Use [Record.Clone] to create a copy with no shared state. -type Record = slog.Record - -// NewRecord creates a Record from the given arguments. -// Use [Record.AddAttrs] to add attributes to the Record. -// -// NewRecord is intended for logging APIs that want to support a [Handler] as -// a backend. -func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record { - return slog.NewRecord(t, level, msg, pc) -} - -// Source describes the location of a line of source code. -type Source = slog.Source diff --git a/vendor/github.com/sagikazarmark/slog-shim/text_handler.go b/vendor/github.com/sagikazarmark/slog-shim/text_handler.go deleted file mode 100644 index 45f6cfcba5..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/text_handler.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "io" - "log/slog" -) - -// TextHandler is a Handler that writes Records to an io.Writer as a -// sequence of key=value pairs separated by spaces and followed by a newline. -type TextHandler = slog.TextHandler - -// NewTextHandler creates a TextHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler { - return slog.NewTextHandler(w, opts) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go b/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go deleted file mode 100644 index a69d63ccea..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "io" - - "golang.org/x/exp/slog" -) - -// TextHandler is a Handler that writes Records to an io.Writer as a -// sequence of key=value pairs separated by spaces and followed by a newline. -type TextHandler = slog.TextHandler - -// NewTextHandler creates a TextHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler { - return slog.NewTextHandler(w, opts) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/value.go b/vendor/github.com/sagikazarmark/slog-shim/value.go deleted file mode 100644 index 61173eb946..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/value.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "log/slog" - "time" -) - -// A Value can represent any Go value, but unlike type any, -// it can represent most small values without an allocation. -// The zero Value corresponds to nil. -type Value = slog.Value - -// Kind is the kind of a Value. -type Kind = slog.Kind - -// The following list is sorted alphabetically, but it's also important that -// KindAny is 0 so that a zero Value represents nil. -const ( - KindAny = slog.KindAny - KindBool = slog.KindBool - KindDuration = slog.KindDuration - KindFloat64 = slog.KindFloat64 - KindInt64 = slog.KindInt64 - KindString = slog.KindString - KindTime = slog.KindTime - KindUint64 = slog.KindUint64 - KindGroup = slog.KindGroup - KindLogValuer = slog.KindLogValuer -) - -//////////////// Constructors - -// StringValue returns a new Value for a string. -func StringValue(value string) Value { - return slog.StringValue(value) -} - -// IntValue returns a Value for an int. -func IntValue(v int) Value { - return slog.IntValue(v) -} - -// Int64Value returns a Value for an int64. -func Int64Value(v int64) Value { - return slog.Int64Value(v) -} - -// Uint64Value returns a Value for a uint64. -func Uint64Value(v uint64) Value { - return slog.Uint64Value(v) -} - -// Float64Value returns a Value for a floating-point number. -func Float64Value(v float64) Value { - return slog.Float64Value(v) -} - -// BoolValue returns a Value for a bool. -func BoolValue(v bool) Value { - return slog.BoolValue(v) -} - -// TimeValue returns a Value for a time.Time. -// It discards the monotonic portion. -func TimeValue(v time.Time) Value { - return slog.TimeValue(v) -} - -// DurationValue returns a Value for a time.Duration. -func DurationValue(v time.Duration) Value { - return slog.DurationValue(v) -} - -// GroupValue returns a new Value for a list of Attrs. -// The caller must not subsequently mutate the argument slice. -func GroupValue(as ...Attr) Value { - return slog.GroupValue(as...) -} - -// AnyValue returns a Value for the supplied value. -// -// If the supplied value is of type Value, it is returned -// unmodified. -// -// Given a value of one of Go's predeclared string, bool, or -// (non-complex) numeric types, AnyValue returns a Value of kind -// String, Bool, Uint64, Int64, or Float64. The width of the -// original numeric type is not preserved. -// -// Given a time.Time or time.Duration value, AnyValue returns a Value of kind -// KindTime or KindDuration. The monotonic time is not preserved. -// -// For nil, or values of all other types, including named types whose -// underlying type is numeric, AnyValue returns a value of kind KindAny. -func AnyValue(v any) Value { - return slog.AnyValue(v) -} - -// A LogValuer is any Go value that can convert itself into a Value for logging. -// -// This mechanism may be used to defer expensive operations until they are -// needed, or to expand a single value into a sequence of components. -type LogValuer = slog.LogValuer diff --git a/vendor/github.com/sagikazarmark/slog-shim/value_120.go b/vendor/github.com/sagikazarmark/slog-shim/value_120.go deleted file mode 100644 index 0f9f871eee..0000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/value_120.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "time" - - "golang.org/x/exp/slog" -) - -// A Value can represent any Go value, but unlike type any, -// it can represent most small values without an allocation. -// The zero Value corresponds to nil. -type Value = slog.Value - -// Kind is the kind of a Value. -type Kind = slog.Kind - -// The following list is sorted alphabetically, but it's also important that -// KindAny is 0 so that a zero Value represents nil. -const ( - KindAny = slog.KindAny - KindBool = slog.KindBool - KindDuration = slog.KindDuration - KindFloat64 = slog.KindFloat64 - KindInt64 = slog.KindInt64 - KindString = slog.KindString - KindTime = slog.KindTime - KindUint64 = slog.KindUint64 - KindGroup = slog.KindGroup - KindLogValuer = slog.KindLogValuer -) - -//////////////// Constructors - -// StringValue returns a new Value for a string. -func StringValue(value string) Value { - return slog.StringValue(value) -} - -// IntValue returns a Value for an int. -func IntValue(v int) Value { - return slog.IntValue(v) -} - -// Int64Value returns a Value for an int64. -func Int64Value(v int64) Value { - return slog.Int64Value(v) -} - -// Uint64Value returns a Value for a uint64. -func Uint64Value(v uint64) Value { - return slog.Uint64Value(v) -} - -// Float64Value returns a Value for a floating-point number. -func Float64Value(v float64) Value { - return slog.Float64Value(v) -} - -// BoolValue returns a Value for a bool. -func BoolValue(v bool) Value { - return slog.BoolValue(v) -} - -// TimeValue returns a Value for a time.Time. -// It discards the monotonic portion. -func TimeValue(v time.Time) Value { - return slog.TimeValue(v) -} - -// DurationValue returns a Value for a time.Duration. -func DurationValue(v time.Duration) Value { - return slog.DurationValue(v) -} - -// GroupValue returns a new Value for a list of Attrs. -// The caller must not subsequently mutate the argument slice. -func GroupValue(as ...Attr) Value { - return slog.GroupValue(as...) -} - -// AnyValue returns a Value for the supplied value. -// -// If the supplied value is of type Value, it is returned -// unmodified. -// -// Given a value of one of Go's predeclared string, bool, or -// (non-complex) numeric types, AnyValue returns a Value of kind -// String, Bool, Uint64, Int64, or Float64. The width of the -// original numeric type is not preserved. -// -// Given a time.Time or time.Duration value, AnyValue returns a Value of kind -// KindTime or KindDuration. The monotonic time is not preserved. -// -// For nil, or values of all other types, including named types whose -// underlying type is numeric, AnyValue returns a value of kind KindAny. -func AnyValue(v any) Value { - return slog.AnyValue(v) -} - -// A LogValuer is any Go value that can convert itself into a Value for logging. -// -// This mechanism may be used to defer expensive operations until they are -// needed, or to expand a single value into a sequence of components. -type LogValuer = slog.LogValuer diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE b/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE deleted file mode 100644 index e51324f9b5..0000000000 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2021 NYU Secure Systems Lab - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/encrypted/encrypted.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/encrypted/encrypted.go deleted file mode 100644 index 037a718abe..0000000000 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/encrypted/encrypted.go +++ /dev/null @@ -1,290 +0,0 @@ -// Package encrypted provides a simple, secure system for encrypting data -// symmetrically with a passphrase. -// -// It uses scrypt derive a key from the passphrase and the NaCl secret box -// cipher for authenticated encryption. -package encrypted - -import ( - "crypto/rand" - "encoding/json" - "errors" - "fmt" - "io" - - "golang.org/x/crypto/nacl/secretbox" - "golang.org/x/crypto/scrypt" -) - -const saltSize = 32 - -const ( - boxKeySize = 32 - boxNonceSize = 24 -) - -// KDFParameterStrength defines the KDF parameter strength level to be used for -// encryption key derivation. -type KDFParameterStrength uint8 - -const ( - // Legacy defines legacy scrypt parameters (N:2^15, r:8, p:1) - Legacy KDFParameterStrength = iota + 1 - // Standard defines standard scrypt parameters which is focusing 100ms of computation (N:2^16, r:8, p:1) - Standard - // OWASP defines OWASP recommended scrypt parameters (N:2^17, r:8, p:1) - OWASP -) - -var ( - // legacyParams represents old scrypt derivation parameters for backward - // compatibility. - legacyParams = scryptParams{ - N: 32768, // 2^15 - R: 8, - P: 1, - } - - // standardParams defines scrypt parameters based on the scrypt creator - // recommendation to limit key derivation in time boxed to 100ms. - standardParams = scryptParams{ - N: 65536, // 2^16 - R: 8, - P: 1, - } - - // owaspParams defines scrypt parameters recommended by OWASP - owaspParams = scryptParams{ - N: 131072, // 2^17 - R: 8, - P: 1, - } - - // defaultParams defines scrypt parameters which will be used to generate a - // new key. - defaultParams = standardParams -) - -const ( - nameScrypt = "scrypt" - nameSecretBox = "nacl/secretbox" -) - -type data struct { - KDF scryptKDF `json:"kdf"` - Cipher secretBoxCipher `json:"cipher"` - Ciphertext []byte `json:"ciphertext"` -} - -type scryptParams struct { - N int `json:"N"` - R int `json:"r"` - P int `json:"p"` -} - -func (sp *scryptParams) Equal(in *scryptParams) bool { - return in != nil && sp.N == in.N && sp.P == in.P && sp.R == in.R -} - -func newScryptKDF(level KDFParameterStrength) (scryptKDF, error) { - salt := make([]byte, saltSize) - if err := fillRandom(salt); err != nil { - return scryptKDF{}, fmt.Errorf("unable to generate a random salt: %w", err) - } - - var params scryptParams - switch level { - case Legacy: - params = legacyParams - case Standard: - params = standardParams - case OWASP: - params = owaspParams - default: - // Fallback to default parameters - params = defaultParams - } - - return scryptKDF{ - Name: nameScrypt, - Params: params, - Salt: salt, - }, nil -} - -type scryptKDF struct { - Name string `json:"name"` - Params scryptParams `json:"params"` - Salt []byte `json:"salt"` -} - -func (s *scryptKDF) Key(passphrase []byte) ([]byte, error) { - return scrypt.Key(passphrase, s.Salt, s.Params.N, s.Params.R, s.Params.P, boxKeySize) -} - -// CheckParams checks that the encoded KDF parameters are what we expect them to -// be. If we do not do this, an attacker could cause a DoS by tampering with -// them. -func (s *scryptKDF) CheckParams() error { - switch { - case legacyParams.Equal(&s.Params): - case standardParams.Equal(&s.Params): - case owaspParams.Equal(&s.Params): - default: - return errors.New("unsupported scrypt parameters") - } - - return nil -} - -func newSecretBoxCipher() (secretBoxCipher, error) { - nonce := make([]byte, boxNonceSize) - if err := fillRandom(nonce); err != nil { - return secretBoxCipher{}, err - } - return secretBoxCipher{ - Name: nameSecretBox, - Nonce: nonce, - }, nil -} - -type secretBoxCipher struct { - Name string `json:"name"` - Nonce []byte `json:"nonce"` - - encrypted bool -} - -func (s *secretBoxCipher) Encrypt(plaintext, key []byte) []byte { - var keyBytes [boxKeySize]byte - var nonceBytes [boxNonceSize]byte - - if len(key) != len(keyBytes) { - panic("incorrect key size") - } - if len(s.Nonce) != len(nonceBytes) { - panic("incorrect nonce size") - } - - copy(keyBytes[:], key) - copy(nonceBytes[:], s.Nonce) - - // ensure that we don't re-use nonces - if s.encrypted { - panic("Encrypt must only be called once for each cipher instance") - } - s.encrypted = true - - return secretbox.Seal(nil, plaintext, &nonceBytes, &keyBytes) -} - -func (s *secretBoxCipher) Decrypt(ciphertext, key []byte) ([]byte, error) { - var keyBytes [boxKeySize]byte - var nonceBytes [boxNonceSize]byte - - if len(key) != len(keyBytes) { - panic("incorrect key size") - } - if len(s.Nonce) != len(nonceBytes) { - // return an error instead of panicking since the nonce is user input - return nil, errors.New("encrypted: incorrect nonce size") - } - - copy(keyBytes[:], key) - copy(nonceBytes[:], s.Nonce) - - res, ok := secretbox.Open(nil, ciphertext, &nonceBytes, &keyBytes) - if !ok { - return nil, errors.New("encrypted: decryption failed") - } - return res, nil -} - -// Encrypt takes a passphrase and plaintext, and returns a JSON object -// containing ciphertext and the details necessary to decrypt it. -func Encrypt(plaintext, passphrase []byte) ([]byte, error) { - return EncryptWithCustomKDFParameters(plaintext, passphrase, Standard) -} - -// EncryptWithCustomKDFParameters takes a passphrase, the plaintext and a KDF -// parameter level (Legacy, Standard, or OWASP), and returns a JSON object -// containing ciphertext and the details necessary to decrypt it. -func EncryptWithCustomKDFParameters(plaintext, passphrase []byte, kdfLevel KDFParameterStrength) ([]byte, error) { - k, err := newScryptKDF(kdfLevel) - if err != nil { - return nil, err - } - key, err := k.Key(passphrase) - if err != nil { - return nil, err - } - - c, err := newSecretBoxCipher() - if err != nil { - return nil, err - } - - data := &data{ - KDF: k, - Cipher: c, - } - data.Ciphertext = c.Encrypt(plaintext, key) - - return json.Marshal(data) -} - -// Marshal encrypts the JSON encoding of v using passphrase. -func Marshal(v interface{}, passphrase []byte) ([]byte, error) { - return MarshalWithCustomKDFParameters(v, passphrase, Standard) -} - -// MarshalWithCustomKDFParameters encrypts the JSON encoding of v using passphrase. -func MarshalWithCustomKDFParameters(v interface{}, passphrase []byte, kdfLevel KDFParameterStrength) ([]byte, error) { - data, err := json.MarshalIndent(v, "", "\t") - if err != nil { - return nil, err - } - return EncryptWithCustomKDFParameters(data, passphrase, kdfLevel) -} - -// Decrypt takes a JSON-encoded ciphertext object encrypted using Encrypt and -// tries to decrypt it using passphrase. If successful, it returns the -// plaintext. -func Decrypt(ciphertext, passphrase []byte) ([]byte, error) { - data := &data{} - if err := json.Unmarshal(ciphertext, data); err != nil { - return nil, err - } - - if data.KDF.Name != nameScrypt { - return nil, fmt.Errorf("encrypted: unknown kdf name %q", data.KDF.Name) - } - if data.Cipher.Name != nameSecretBox { - return nil, fmt.Errorf("encrypted: unknown cipher name %q", data.Cipher.Name) - } - if err := data.KDF.CheckParams(); err != nil { - return nil, err - } - - key, err := data.KDF.Key(passphrase) - if err != nil { - return nil, err - } - - return data.Cipher.Decrypt(data.Ciphertext, key) -} - -// Unmarshal decrypts the data using passphrase and unmarshals the resulting -// plaintext into the value pointed to by v. -func Unmarshal(data []byte, v interface{}, passphrase []byte) error { - decrypted, err := Decrypt(data, passphrase) - if err != nil { - return err - } - return json.Unmarshal(decrypted, v) -} - -func fillRandom(b []byte) error { - _, err := io.ReadFull(rand.Reader, b) - return err -} diff --git a/vendor/github.com/sigstore/cosign/v2/COPYRIGHT.txt b/vendor/github.com/sigstore/cosign/v2/COPYRIGHT.txt deleted file mode 100644 index 7a01c84986..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/COPYRIGHT.txt +++ /dev/null @@ -1,14 +0,0 @@ - -Copyright 2021 The Sigstore Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/sigstore/cosign/v2/LICENSE b/vendor/github.com/sigstore/cosign/v2/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/sigstore/cosign/v2/internal/pkg/oci/remote/remote.go b/vendor/github.com/sigstore/cosign/v2/internal/pkg/oci/remote/remote.go deleted file mode 100644 index 250a5c0d27..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/internal/pkg/oci/remote/remote.go +++ /dev/null @@ -1,25 +0,0 @@ -// -// Copyright 2023 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "fmt" -) - -// ArtifactType converts a attachment name (sig/sbom/att/etc.) into a valid artifactType (OCI 1.1+). -func ArtifactType(attName string) string { - return fmt.Sprintf("application/vnd.dev.cosign.artifact.%s.v1+json", attName) -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/bundle/rekor.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/bundle/rekor.go deleted file mode 100644 index 5331302051..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/bundle/rekor.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2022 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bundle - -import ( - "github.com/sigstore/rekor/pkg/generated/models" -) - -// RekorBundle holds metadata about recording a Signature's ephemeral key to -// a Rekor transparency log. -type RekorBundle struct { - SignedEntryTimestamp []byte - Payload RekorPayload -} - -type RekorPayload struct { - Body interface{} `json:"body"` - IntegratedTime int64 `json:"integratedTime"` - LogIndex int64 `json:"logIndex"` - LogID string `json:"logID"` -} - -func EntryToBundle(entry *models.LogEntryAnon) *RekorBundle { - if entry.Verification == nil { - return nil - } - return &RekorBundle{ - SignedEntryTimestamp: entry.Verification.SignedEntryTimestamp, - Payload: RekorPayload{ - Body: entry.Body, - IntegratedTime: *entry.IntegratedTime, - LogIndex: *entry.LogIndex, - LogID: *entry.LogID, - }, - } -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/bundle/tsa.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/bundle/tsa.go deleted file mode 100644 index bbb846759b..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/bundle/tsa.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2022 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bundle - -// RFC3161Timestamp holds metadata about timestamp RFC3161 verification data. -type RFC3161Timestamp struct { - // SignedRFC3161Timestamp contains a DER encoded TimeStampResponse. - // See https://www.rfc-editor.org/rfc/rfc3161.html#section-2.4.2 - // Clients MUST verify the hashed message in the message imprint, - // typically using the artifact signature. - SignedRFC3161Timestamp []byte -} - -// TimestampToRFC3161Timestamp receives a base64 encoded RFC3161 timestamp. -func TimestampToRFC3161Timestamp(timestampRFC3161 []byte) *RFC3161Timestamp { - if timestampRFC3161 != nil { - return &RFC3161Timestamp{ - SignedRFC3161Timestamp: timestampRFC3161, - } - } - return nil -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/env/env.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/env/env.go deleted file mode 100644 index 5c26d4f516..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/env/env.go +++ /dev/null @@ -1,247 +0,0 @@ -// -// Copyright 2022 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package env - -import ( - "fmt" - "os" - "strings" -) - -// Variable is a type representing an environment variable -type Variable string - -// VariableOpts closely describes a Variable -type VariableOpts struct { - // Description contains description for the environment variable - Description string - // Expects describes what value is expected by the environment variable - Expects string - // Sensitive is used for environment variables with sensitive values - // (e.g. passwords, credentials, etc.) - Sensitive bool - // External is used for environment variables coming from external projects - // and dependencies (e.g. GITHUB_TOKEN, SIGSTORE_, TUF_) - External bool -} - -func (v Variable) String() string { - return string(v) -} - -const ( - // Cosign environment variables - VariableExperimental Variable = "COSIGN_EXPERIMENTAL" - VariableDockerMediaTypes Variable = "COSIGN_DOCKER_MEDIA_TYPES" - VariablePassword Variable = "COSIGN_PASSWORD" - VariablePKCS11Pin Variable = "COSIGN_PKCS11_PIN" - VariablePKCS11ModulePath Variable = "COSIGN_PKCS11_MODULE_PATH" - VariablePKCS11IgnoreCertificate Variable = "COSIGN_PKCS11_IGNORE_CERTIFICATE" - VariableRepository Variable = "COSIGN_REPOSITORY" - - // Sigstore environment variables - VariableSigstoreCTLogPublicKeyFile Variable = "SIGSTORE_CT_LOG_PUBLIC_KEY_FILE" - VariableSigstoreRootFile Variable = "SIGSTORE_ROOT_FILE" - VariableSigstoreRekorPublicKey Variable = "SIGSTORE_REKOR_PUBLIC_KEY" - VariableSigstoreIDToken Variable = "SIGSTORE_ID_TOKEN" //nolint:gosec - - // Other external environment variables - VariableGitHubHost Variable = "GITHUB_HOST" - VariableGitHubToken Variable = "GITHUB_TOKEN" //nolint:gosec - VariableGitHubRequestToken Variable = "ACTIONS_ID_TOKEN_REQUEST_TOKEN" - VariableGitHubRequestURL Variable = "ACTIONS_ID_TOKEN_REQUEST_URL" - VariableSPIFFEEndpointSocket Variable = "SPIFFE_ENDPOINT_SOCKET" - VariableGoogleServiceAccountName Variable = "GOOGLE_SERVICE_ACCOUNT_NAME" - VariableGitLabHost Variable = "GITLAB_HOST" - VariableGitLabToken Variable = "GITLAB_TOKEN" - VariableBuildkiteAgentAccessToken Variable = "BUILDKITE_AGENT_ACCESS_TOKEN" - VariableBuildkiteAgentEndpoint Variable = "BUILDKITE_AGENT_ENDPOINT" - VariableBuildkiteJobID Variable = "BUILDKITE_JOB_ID" - VariableBuildkiteAgentLogLevel Variable = "BUILDKITE_AGENT_LOG_LEVEL" - VariableSourceDateEpoch Variable = "SOURCE_DATE_EPOCH" -) - -var ( - // NB: this is intentionally private to avoid anyone changing this from - // code. There's a getter function used to get this slice if needed. - environmentVariables = map[Variable]VariableOpts{ - VariableExperimental: { - Description: "enables experimental cosign features", - Expects: "1 if experimental features should be enabled (0 by default)", - Sensitive: false, - }, - VariableDockerMediaTypes: { - Description: "to be used with registries that do not support OCI media types", - Expects: "1 to fallback to legacy OCI media types equivalents (0 by default)", - Sensitive: false, - }, - VariablePassword: { - Description: "overrides password inputs with this value", - Expects: "string with a password (asks on stdin by default)", - Sensitive: true, - }, - VariablePKCS11Pin: { - Description: "to be used if PKCS11 PIN is not provided", - Expects: "string with a PIN", - Sensitive: true, - }, - VariablePKCS11ModulePath: { - Description: "is PKCS11 module-path", - Expects: "string with a module-path", - Sensitive: false, - }, - VariablePKCS11IgnoreCertificate: { - Description: "disables loading certificates with PKCS11", - Expects: "1 if loading certificates should be disabled (0 by default)", - Sensitive: false, - }, - VariableRepository: { - Description: "can be used to store signatures in an alternate location", - Expects: "string with a repository", - Sensitive: false, - }, - - VariableSigstoreCTLogPublicKeyFile: { - Description: "overrides what is used to validate the SCT coming back from Fulcio", - Expects: "path to the public key file", - Sensitive: false, - External: true, - }, - VariableSigstoreRootFile: { - Description: "overrides the public good instance root CA", - Expects: "path to the root CA", - Sensitive: false, - External: true, - }, - VariableSigstoreRekorPublicKey: { - Description: "if specified, you can specify an oob Public Key that Rekor uses", - Expects: "path to the public key", - Sensitive: false, - External: true, - }, - - VariableGitHubHost: { - Description: "is URL of the GitHub Enterprise instance", - Expects: "string with the URL of GitHub Enterprise instance", - Sensitive: false, - External: true, - }, - VariableGitHubToken: { - Description: "is a token used to authenticate with GitHub", - Expects: "token generated on GitHub", - Sensitive: true, - External: true, - }, - VariableGitHubRequestToken: { - Description: "is bearer token for the request to the OIDC provider", - Expects: "string with a bearer token", - Sensitive: true, - External: true, - }, - VariableGitHubRequestURL: { - Description: "is the URL for GitHub's OIDC provider", - Expects: "string with the URL for the OIDC provider", - Sensitive: false, - External: true, - }, - VariableSPIFFEEndpointSocket: { - Description: "allows you to specify non-default SPIFFE socket to use.", - Expects: "string with SPIFFE socket path", - Sensitive: false, - External: true, - }, - VariableGoogleServiceAccountName: { - Description: "is a service account name to be used with the Google provider", - Expects: "string with the service account's name", - Sensitive: false, - External: true, - }, - VariableGitLabHost: { - Description: "is URL of the GitLab instance", - Expects: "string with the URL of GitLab instance", - Sensitive: false, - External: true, - }, - VariableGitLabToken: { - Description: "is a token used to authenticate with GitLab", - Expects: "string with a token", - Sensitive: true, - External: true, - }, - VariableBuildkiteAgentAccessToken: { - Description: "is an access token used to identify the Buildkite agent", - Expects: "string with an access token", - Sensitive: true, - External: true, - }, - VariableBuildkiteAgentEndpoint: { - Description: "the Buildkite agent endpoint", - Expects: "string with an endpoint", - Sensitive: false, - External: true, - }, - VariableBuildkiteJobID: { - Description: "the Buildkite job ID to claim in the OIDC token", - Expects: "string with a job ID", - Sensitive: false, - External: true, - }, - VariableBuildkiteAgentLogLevel: { - Description: "the log level for the Buildkite agent", - Expects: "string with log level, either debug, notice, info, error, warn, fatal (default: notice)", - Sensitive: false, - External: true, - }, - VariableSigstoreIDToken: { - Description: "is a OIDC token used to authenticate to Fulcio", - Expects: "string with a OIDC token", - Sensitive: true, - External: true, - }, - VariableSourceDateEpoch: { - Description: "overrides current time for reproducible builds, see https://reproducible-builds.org/docs/source-date-epoch/", - Expects: "number of seconds since unix epoch", - Sensitive: false, - External: true, - }, - } -) - -func EnvironmentVariables() map[Variable]VariableOpts { - return environmentVariables -} - -func mustRegisterEnv(name Variable) { - opts, ok := environmentVariables[name] - if !ok { - panic(fmt.Sprintf("environment variable %q is not registered in pkg/cosign/env", name.String())) - } - if !opts.External && !strings.HasPrefix(name.String(), "COSIGN_") { - panic(fmt.Sprintf("cosign environment variable %q must start with COSIGN_ prefix", name.String())) - } -} - -func Getenv(name Variable) string { - mustRegisterEnv(name) - - return os.Getenv(name.String()) //nolint:forbidigo -} - -func LookupEnv(name Variable) (string, bool) { - mustRegisterEnv(name) - - return os.LookupEnv(name.String()) //nolint:forbidigo -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/doc.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/doc.go deleted file mode 100644 index af68616b64..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package oci holds functions and types intended to align and compose with -// github.com/google/go-containerregistry. -package oci diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/empty/empty.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/empty/empty.go deleted file mode 100644 index 599ad08f8e..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/empty/empty.go +++ /dev/null @@ -1,48 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package empty - -import ( - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/empty" - "github.com/google/go-containerregistry/pkg/v1/mutate" - "github.com/google/go-containerregistry/pkg/v1/types" - - "github.com/sigstore/cosign/v2/pkg/oci" -) - -// Signatures constructs an empty oci.Signatures. -func Signatures() oci.Signatures { - base := empty.Image - if !oci.DockerMediaTypes() { - base = mutate.MediaType(base, types.OCIManifestSchema1) - base = mutate.ConfigMediaType(base, types.OCIConfigJSON) - } - return &emptyImage{ - Image: base, - } -} - -type emptyImage struct { - v1.Image -} - -var _ oci.Signatures = (*emptyImage)(nil) - -// Get implements oci.Signatures -func (*emptyImage) Get() ([]oci.Signature, error) { - return nil, nil -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/empty/signed.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/empty/signed.go deleted file mode 100644 index 9847e128c4..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/empty/signed.go +++ /dev/null @@ -1,70 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package empty - -import ( - "errors" - "fmt" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/empty" - "github.com/sigstore/cosign/v2/pkg/oci" -) - -type signedImage struct { - v1.Image - digest v1.Hash - signature oci.Signatures - attestations oci.Signatures -} - -func (se *signedImage) Signatures() (oci.Signatures, error) { - return se.signature, nil -} - -func (se *signedImage) Attestations() (oci.Signatures, error) { - return se.attestations, nil -} - -func (se *signedImage) Attachment(name string) (oci.File, error) { //nolint: revive - return nil, errors.New("no attachments") -} - -func (se *signedImage) Digest() (v1.Hash, error) { - if se.digest.Hex == "" { - return v1.Hash{}, fmt.Errorf("digest not available") - } - return se.digest, nil -} - -func SignedImage(ref name.Reference) (oci.SignedImage, error) { - var err error - d := v1.Hash{} - base := empty.Image - if digest, ok := ref.(name.Digest); ok { - d, err = v1.NewHash(digest.DigestStr()) - if err != nil { - return nil, err - } - } - return &signedImage{ - Image: base, - digest: d, - signature: Signatures(), - attestations: Signatures(), - }, nil -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/file.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/file.go deleted file mode 100644 index 2d354ff975..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/file.go +++ /dev/null @@ -1,30 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oci - -import "github.com/google/go-containerregistry/pkg/v1/types" - -// File is a degenerate form of SignedImage that stores a single file as a v1.Layer -type File interface { - SignedImage - - // FileMediaType retrieves the media type of the File - FileMediaType() (types.MediaType, error) - - // Payload fetches the opaque data that is being signed. - // This will always return data when there is no error. - Payload() ([]byte, error) -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/image.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/image.go deleted file mode 100644 index 2504ca43a5..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/image.go +++ /dev/null @@ -1,25 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oci - -import v1 "github.com/google/go-containerregistry/pkg/v1" - -// SignedImage represents an OCI Image, complemented with accessors -// for retrieving signed metadata associated with that image. -type SignedImage interface { - v1.Image - SignedEntity -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/index.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/index.go deleted file mode 100644 index cf7331e60c..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/index.go +++ /dev/null @@ -1,33 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oci - -import v1 "github.com/google/go-containerregistry/pkg/v1" - -// SignedIndex represents an OCI ImageIndex, complemented with accessors -// for retrieving signed metadata associated with that ImageIndex. -type SignedImageIndex interface { - v1.ImageIndex - SignedEntity - - // SignedImage is the same as Image, but provides accessors for the nested - // image's signed metadata. - SignedImage(v1.Hash) (SignedImage, error) - - // SignedImageIndex is the same as ImageIndex, but provides accessors for - // the nested image index's signed metadata. - SignedImageIndex(v1.Hash) (SignedImageIndex, error) -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/interface.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/interface.go deleted file mode 100644 index 9d5bb9a0fe..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/interface.go +++ /dev/null @@ -1,36 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oci - -import v1 "github.com/google/go-containerregistry/pkg/v1" - -type SignedEntity interface { - // Digest returns the sha256 of this image's manifest. - Digest() (v1.Hash, error) - - // Signatures returns the set of signatures currently associated with this - // entity, or the empty equivalent if none are found. - Signatures() (Signatures, error) - - // Attestations returns the set of attestations currently associated with this - // entity, or the empty equivalent if none are found. - // Attestations are just like a Signature, but they do not contain - // Base64Signature because it's baked into the payload. - Attestations() (Signatures, error) - - // Attachment returns a named entity associated with this entity, or error if not found. - Attachment(name string) (File, error) -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/internal/signature/layer.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/internal/signature/layer.go deleted file mode 100644 index 251d3edc09..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/internal/signature/layer.go +++ /dev/null @@ -1,141 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package signature - -import ( - "crypto/x509" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "strings" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/sigstore/cosign/v2/pkg/cosign/bundle" - "github.com/sigstore/cosign/v2/pkg/oci" - "github.com/sigstore/sigstore/pkg/cryptoutils" -) - -const ( - sigkey = "dev.cosignproject.cosign/signature" - certkey = "dev.sigstore.cosign/certificate" - chainkey = "dev.sigstore.cosign/chain" - BundleKey = "dev.sigstore.cosign/bundle" - RFC3161TimestampKey = "dev.sigstore.cosign/rfc3161timestamp" -) - -type sigLayer struct { - v1.Layer - desc v1.Descriptor -} - -func New(l v1.Layer, desc v1.Descriptor) oci.Signature { - return &sigLayer{ - Layer: l, - desc: desc, - } -} - -var _ oci.Signature = (*sigLayer)(nil) - -// Annotations implements oci.Signature -func (s *sigLayer) Annotations() (map[string]string, error) { - return s.desc.Annotations, nil -} - -// Payload implements oci.Signature -func (s *sigLayer) Payload() ([]byte, error) { - // Compressed is a misnomer here, we just want the raw bytes from the registry. - r, err := s.Layer.Compressed() - if err != nil { - return nil, err - } - payload, err := io.ReadAll(r) - if err != nil { - return nil, err - } - return payload, nil -} - -// Signature implements oci.Signature -func (s *sigLayer) Signature() ([]byte, error) { - b64sig, err := s.Base64Signature() - if err != nil { - return nil, err - } - return base64.StdEncoding.DecodeString(b64sig) -} - -// Base64Signature implements oci.Signature -func (s *sigLayer) Base64Signature() (string, error) { - b64sig, ok := s.desc.Annotations[sigkey] - if !ok { - return "", fmt.Errorf("signature layer %s is missing %q annotation", s.desc.Digest, sigkey) - } - return b64sig, nil -} - -// Cert implements oci.Signature -func (s *sigLayer) Cert() (*x509.Certificate, error) { - certPEM := s.desc.Annotations[certkey] - if certPEM == "" { - return nil, nil - } - certs, err := cryptoutils.LoadCertificatesFromPEM(strings.NewReader(certPEM)) - if err != nil { - return nil, err - } - return certs[0], nil -} - -// Chain implements oci.Signature -func (s *sigLayer) Chain() ([]*x509.Certificate, error) { - chainPEM := s.desc.Annotations[chainkey] - if chainPEM == "" { - return nil, nil - } - certs, err := cryptoutils.LoadCertificatesFromPEM(strings.NewReader(chainPEM)) - if err != nil { - return nil, err - } - return certs, nil -} - -// Bundle implements oci.Signature -func (s *sigLayer) Bundle() (*bundle.RekorBundle, error) { - val := s.desc.Annotations[BundleKey] - if val == "" { - return nil, nil - } - var b bundle.RekorBundle - if err := json.Unmarshal([]byte(val), &b); err != nil { - return nil, fmt.Errorf("unmarshaling bundle: %w", err) - } - return &b, nil -} - -// RFC3161Timestamp implements oci.Signature -func (s *sigLayer) RFC3161Timestamp() (*bundle.RFC3161Timestamp, error) { - val := s.desc.Annotations[RFC3161TimestampKey] - if val == "" { - return nil, nil - } - var b bundle.RFC3161Timestamp - if err := json.Unmarshal([]byte(val), &b); err != nil { - return nil, fmt.Errorf("unmarshaling RFC3161 timestamp bundle: %w", err) - } - return &b, nil -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mediatypes.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/mediatypes.go deleted file mode 100644 index a189047720..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mediatypes.go +++ /dev/null @@ -1,34 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oci - -import ( - "strconv" - - "github.com/sigstore/cosign/v2/pkg/cosign/env" -) - -const ( - // Deprecated: use `pkg/cosign/env/VariableDockerMediaTypes` instead. - DockerMediaTypesEnv = env.VariableDockerMediaTypes -) - -func DockerMediaTypes() bool { - if b, err := strconv.ParseBool(env.Getenv(env.VariableDockerMediaTypes)); err == nil { - return b - } - return false -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/map.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/map.go deleted file mode 100644 index 8c31fc1892..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/map.go +++ /dev/null @@ -1,171 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mutate - -import ( - "context" - "errors" - "fmt" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/empty" - "github.com/google/go-containerregistry/pkg/v1/mutate" - "github.com/google/go-containerregistry/pkg/v1/types" - "github.com/sigstore/cosign/v2/pkg/oci" -) - -// Fn is the signature of the callback supplied to Map. -// The oci.SignedEntity is either an oci.SignedImageIndex or an oci.SignedImage. -// This callback is called on oci.SignedImageIndex *before* its children are -// processed with a context that returns IsBeforeChildren(ctx) == true. -// If the images within the SignedImageIndex change after the Before pass, then -// the Fn will be invoked again on the new SignedImageIndex with a context -// that returns IsAfterChildren(ctx) == true. -// If the returned entity is nil, it is filtered from the result of Map. -type Fn func(context.Context, oci.SignedEntity) (oci.SignedEntity, error) - -// ErrSkipChildren is a special error that may be returned from a Mutator -// to skip processing of an index's child entities. -var ErrSkipChildren = errors.New("skip child entities") - -// Map calls `fn` on the signed entity and each of its constituent entities (`SignedImageIndex` -// or `SignedImage`) transitively. -// Any errors returned by an `fn` are returned by `Map`. -func Map(ctx context.Context, parent oci.SignedEntity, fn Fn) (oci.SignedEntity, error) { - parent, err := fn(before(ctx), parent) - switch { - case errors.Is(err, ErrSkipChildren): - return parent, nil - case err != nil: - return nil, err - case parent == nil: - // If the function returns nil, it filters it. - return nil, nil - } - - sii, ok := parent.(oci.SignedImageIndex) - if !ok { - return parent, nil - } - im, err := sii.IndexManifest() - if err != nil { - return nil, err - } - - // Track whether any of the child entities change. - changed := false - - adds := []IndexAddendum{} - for _, desc := range im.Manifests { - switch desc.MediaType { - case types.OCIImageIndex, types.DockerManifestList: - x, err := sii.SignedImageIndex(desc.Digest) - if err != nil { - return nil, err - } - - se, err := Map(ctx, x, fn) - if err != nil { - return nil, err - } else if se == nil { - // If the function returns nil, it filters it. - changed = true - continue - } - - changed = changed || (x != se) - adds = append(adds, IndexAddendum{ - Add: se.(oci.SignedImageIndex), // Must be an image index. - Descriptor: v1.Descriptor{ - URLs: desc.URLs, - MediaType: desc.MediaType, - Annotations: desc.Annotations, - Platform: desc.Platform, - }, - }) - - case types.OCIManifestSchema1, types.DockerManifestSchema2: - x, err := sii.SignedImage(desc.Digest) - if err != nil { - return nil, err - } - - se, err := fn(ctx, x) - if err != nil { - return nil, err - } else if se == nil { - // If the function returns nil, it filters it. - changed = true - continue - } - - changed = changed || (x != se) - adds = append(adds, IndexAddendum{ - Add: se.(oci.SignedImage), // Must be an image - Descriptor: v1.Descriptor{ - URLs: desc.URLs, - MediaType: desc.MediaType, - Annotations: desc.Annotations, - Platform: desc.Platform, - }, - }) - - default: - return nil, fmt.Errorf("unknown mime type: %v", desc.MediaType) - } - } - - if !changed { - return parent, nil - } - - // Preserve the key attributes from the base IndexManifest. - e := mutate.IndexMediaType(empty.Index, im.MediaType) - e = mutate.Annotations(e, im.Annotations).(v1.ImageIndex) - - // Construct a new ImageIndex from the new constituent signed images. - result := AppendManifests(e, adds...) - - // Since the children changed, give the callback a crack at the new image index. - return fn(after(ctx), result) -} - -// This is used to associate which pass of the Map a particular -// callback is being invoked for. -type mapPassKey struct{} - -// before decorates the context such that IsBeforeChildren(ctx) is true. -func before(ctx context.Context) context.Context { - return context.WithValue(ctx, mapPassKey{}, "before") -} - -// after decorates the context such that IsAfterChildren(ctx) is true. -func after(ctx context.Context) context.Context { - return context.WithValue(ctx, mapPassKey{}, "after") -} - -// IsBeforeChildren is true within a Mutator when it is called before the children -// have been processed. -func IsBeforeChildren(ctx context.Context) bool { - return ctx.Value(mapPassKey{}) == "before" -} - -// IsAfterChildren is true within a Mutator when it is called after the children -// have been processed; however, this call is only made if the set of children -// changes since the Before call. -func IsAfterChildren(ctx context.Context) bool { - return ctx.Value(mapPassKey{}) == "after" -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/mutate.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/mutate.go deleted file mode 100644 index f934cb358d..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/mutate.go +++ /dev/null @@ -1,381 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mutate - -import ( - "errors" - "fmt" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/mutate" - "github.com/sigstore/cosign/v2/pkg/oci" - "github.com/sigstore/cosign/v2/pkg/oci/empty" - "github.com/sigstore/cosign/v2/pkg/oci/signed" -) - -// Appendable is our signed version of mutate.Appendable -type Appendable interface { - oci.SignedEntity - mutate.Appendable -} - -// IndexAddendum is our signed version of mutate.IndexAddendum -type IndexAddendum struct { - Add Appendable - v1.Descriptor -} - -// AppendManifests is a form of mutate.AppendManifests that produces an -// oci.SignedImageIndex. The index itself will contain no signatures, -// but allows access to the contained signed entities. -func AppendManifests(base v1.ImageIndex, adds ...IndexAddendum) oci.SignedImageIndex { - madds := make([]mutate.IndexAddendum, 0, len(adds)) - for _, add := range adds { - madds = append(madds, mutate.IndexAddendum{ - Add: add.Add, - Descriptor: add.Descriptor, - }) - } - return &indexWrapper{ - v1Index: mutate.AppendManifests(base, madds...), - ogbase: base, - addendum: adds, - } -} - -// We alias ImageIndex so that we can inline it without the type -// name colliding with the name of a method it had to implement. -type v1Index v1.ImageIndex - -type indexWrapper struct { - v1Index - ogbase v1Index - addendum []IndexAddendum -} - -var _ oci.SignedImageIndex = (*indexWrapper)(nil) - -// Signatures implements oci.SignedImageIndex -func (i *indexWrapper) Signatures() (oci.Signatures, error) { - return empty.Signatures(), nil -} - -// Attestations implements oci.SignedImageIndex -func (i *indexWrapper) Attestations() (oci.Signatures, error) { - return empty.Signatures(), nil -} - -// Attachment implements oci.SignedImage -func (*indexWrapper) Attachment(name string) (oci.File, error) { //nolint: revive - return nil, errors.New("unimplemented") -} - -// SignedImage implements oci.SignedImageIndex -func (i *indexWrapper) SignedImage(h v1.Hash) (oci.SignedImage, error) { - for _, add := range i.addendum { - si, ok := add.Add.(oci.SignedImage) - if !ok { - continue - } - if d, err := si.Digest(); err != nil { - return nil, err - } else if d == h { - return si, nil - } - } - - if sb, ok := i.ogbase.(oci.SignedImageIndex); ok { - return sb.SignedImage(h) - } - - unsigned, err := i.Image(h) - if err != nil { - return nil, err - } - return signed.Image(unsigned), nil -} - -// SignedImageIndex implements oci.SignedImageIndex -func (i *indexWrapper) SignedImageIndex(h v1.Hash) (oci.SignedImageIndex, error) { - for _, add := range i.addendum { - sii, ok := add.Add.(oci.SignedImageIndex) - if !ok { - continue - } - if d, err := sii.Digest(); err != nil { - return nil, err - } else if d == h { - return sii, nil - } - } - - if sb, ok := i.ogbase.(oci.SignedImageIndex); ok { - return sb.SignedImageIndex(h) - } - - unsigned, err := i.ImageIndex(h) - if err != nil { - return nil, err - } - return signed.ImageIndex(unsigned), nil -} - -// AttachSignatureToEntity attaches the provided signature to the provided entity. -func AttachSignatureToEntity(se oci.SignedEntity, sig oci.Signature, opts ...SignOption) (oci.SignedEntity, error) { - switch obj := se.(type) { - case oci.SignedImage: - return AttachSignatureToImage(obj, sig, opts...) - case oci.SignedImageIndex: - return AttachSignatureToImageIndex(obj, sig, opts...) - default: - return AttachSignatureToUnknown(obj, sig, opts...) - } -} - -// AttachAttestationToEntity attaches the provided attestation to the provided entity. -func AttachAttestationToEntity(se oci.SignedEntity, att oci.Signature, opts ...SignOption) (oci.SignedEntity, error) { - switch obj := se.(type) { - case oci.SignedImage: - return AttachAttestationToImage(obj, att, opts...) - case oci.SignedImageIndex: - return AttachAttestationToImageIndex(obj, att, opts...) - default: - return AttachAttestationToUnknown(obj, att, opts...) - } -} - -// AttachFileToEntity attaches the provided file to the provided entity. -func AttachFileToEntity(se oci.SignedEntity, name string, f oci.File, opts ...SignOption) (oci.SignedEntity, error) { - switch obj := se.(type) { - case oci.SignedImage: - return AttachFileToImage(obj, name, f, opts...) - case oci.SignedImageIndex: - return AttachFileToImageIndex(obj, name, f, opts...) - default: - return AttachFileToUnknown(obj, name, f, opts...) - } -} - -// AttachSignatureToImage attaches the provided signature to the provided image. -func AttachSignatureToImage(si oci.SignedImage, sig oci.Signature, opts ...SignOption) (oci.SignedImage, error) { - return &signedImage{ - SignedImage: si, - sig: sig, - attachments: make(map[string]oci.File), - so: makeSignOpts(opts...), - }, nil -} - -// AttachAttestationToImage attaches the provided attestation to the provided image. -func AttachAttestationToImage(si oci.SignedImage, att oci.Signature, opts ...SignOption) (oci.SignedImage, error) { - return &signedImage{ - SignedImage: si, - att: att, - attachments: make(map[string]oci.File), - so: makeSignOpts(opts...), - }, nil -} - -// AttachFileToImage attaches the provided file to the provided image. -func AttachFileToImage(si oci.SignedImage, name string, f oci.File, opts ...SignOption) (oci.SignedImage, error) { - return &signedImage{ - SignedImage: si, - attachments: map[string]oci.File{ - name: f, - }, - so: makeSignOpts(opts...), - }, nil -} - -type signedImage struct { - oci.SignedImage - sig oci.Signature - att oci.Signature - so *signOpts - attachments map[string]oci.File -} - -// Signatures implements oci.SignedImage -func (si *signedImage) Signatures() (oci.Signatures, error) { - return si.so.dedupeAndReplace(si.sig, si.SignedImage.Signatures) -} - -// Attestations implements oci.SignedImage -func (si *signedImage) Attestations() (oci.Signatures, error) { - return si.so.dedupeAndReplace(si.att, si.SignedImage.Attestations) -} - -// Attachment implements oci.SignedImage -func (si *signedImage) Attachment(attName string) (oci.File, error) { - if f, ok := si.attachments[attName]; ok { - return f, nil - } - return nil, fmt.Errorf("attachment %q not found", attName) -} - -// AttachSignatureToImageIndex attaches the provided signature to the provided image index. -func AttachSignatureToImageIndex(sii oci.SignedImageIndex, sig oci.Signature, opts ...SignOption) (oci.SignedImageIndex, error) { - return &signedImageIndex{ - ociSignedImageIndex: sii, - sig: sig, - attachments: make(map[string]oci.File), - so: makeSignOpts(opts...), - }, nil -} - -// AttachAttestationToImageIndex attaches the provided attestation to the provided image index. -func AttachAttestationToImageIndex(sii oci.SignedImageIndex, att oci.Signature, opts ...SignOption) (oci.SignedImageIndex, error) { - return &signedImageIndex{ - ociSignedImageIndex: sii, - att: att, - attachments: make(map[string]oci.File), - so: makeSignOpts(opts...), - }, nil -} - -// AttachFileToImageIndex attaches the provided file to the provided image index. -func AttachFileToImageIndex(sii oci.SignedImageIndex, name string, f oci.File, opts ...SignOption) (oci.SignedImageIndex, error) { - return &signedImageIndex{ - ociSignedImageIndex: sii, - attachments: map[string]oci.File{ - name: f, - }, - so: makeSignOpts(opts...), - }, nil -} - -type ociSignedImageIndex oci.SignedImageIndex - -type signedImageIndex struct { - ociSignedImageIndex - sig oci.Signature - att oci.Signature - so *signOpts - attachments map[string]oci.File -} - -// Signatures implements oci.SignedImageIndex -func (sii *signedImageIndex) Signatures() (oci.Signatures, error) { - return sii.so.dedupeAndReplace(sii.sig, sii.ociSignedImageIndex.Signatures) -} - -// Attestations implements oci.SignedImageIndex -func (sii *signedImageIndex) Attestations() (oci.Signatures, error) { - return sii.so.dedupeAndReplace(sii.att, sii.ociSignedImageIndex.Attestations) -} - -// Attachment implements oci.SignedImageIndex -func (sii *signedImageIndex) Attachment(attName string) (oci.File, error) { - if f, ok := sii.attachments[attName]; ok { - return f, nil - } - return nil, fmt.Errorf("attachment %q not found", attName) -} - -// AttachSignatureToUnknown attaches the provided signature to the provided image. -func AttachSignatureToUnknown(se oci.SignedEntity, sig oci.Signature, opts ...SignOption) (oci.SignedEntity, error) { - return &signedUnknown{ - SignedEntity: se, - sig: sig, - attachments: make(map[string]oci.File), - so: makeSignOpts(opts...), - }, nil -} - -// AttachAttestationToUnknown attaches the provided attestation to the provided image. -func AttachAttestationToUnknown(se oci.SignedEntity, att oci.Signature, opts ...SignOption) (oci.SignedEntity, error) { - return &signedUnknown{ - SignedEntity: se, - att: att, - attachments: make(map[string]oci.File), - so: makeSignOpts(opts...), - }, nil -} - -// AttachFileToUnknown attaches the provided file to the provided image. -func AttachFileToUnknown(se oci.SignedEntity, name string, f oci.File, opts ...SignOption) (oci.SignedEntity, error) { - return &signedUnknown{ - SignedEntity: se, - attachments: map[string]oci.File{ - name: f, - }, - so: makeSignOpts(opts...), - }, nil -} - -type signedUnknown struct { - oci.SignedEntity - sig oci.Signature - att oci.Signature - so *signOpts - attachments map[string]oci.File -} - -type digestable interface { - Digest() (v1.Hash, error) -} - -// Digest is generally implemented by oci.SignedEntity implementations. -func (si *signedUnknown) Digest() (v1.Hash, error) { - d, ok := si.SignedEntity.(digestable) - if !ok { - return v1.Hash{}, fmt.Errorf("underlying signed entity not digestable: %T", si.SignedEntity) - } - return d.Digest() -} - -// Signatures implements oci.SignedEntity -func (si *signedUnknown) Signatures() (oci.Signatures, error) { - return si.so.dedupeAndReplace(si.sig, si.SignedEntity.Signatures) -} - -// Attestations implements oci.SignedEntity -func (si *signedUnknown) Attestations() (oci.Signatures, error) { - return si.so.dedupeAndReplace(si.att, si.SignedEntity.Attestations) -} - -// Attachment implements oci.SignedEntity -func (si *signedUnknown) Attachment(attName string) (oci.File, error) { - if f, ok := si.attachments[attName]; ok { - return f, nil - } - return nil, fmt.Errorf("attachment %q not found", attName) -} - -func (so *signOpts) dedupeAndReplace(sig oci.Signature, basefn func() (oci.Signatures, error)) (oci.Signatures, error) { - base, err := basefn() - if err != nil { - return nil, err - } else if sig == nil { - return base, nil - } - if so.dd != nil { - if existing, err := so.dd.Find(base, sig); err != nil { - return nil, err - } else if existing != nil { - // Just return base if the signature is redundant - return base, nil - } - } - if so.ro != nil { - replace, err := so.ro.Replace(base, sig) - if err != nil { - return nil, err - } - return ReplaceSignatures(replace) - } - return AppendSignatures(base, sig) -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/options.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/options.go deleted file mode 100644 index a4c41b6fa9..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/options.go +++ /dev/null @@ -1,115 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mutate - -import ( - "github.com/google/go-containerregistry/pkg/v1/types" - "github.com/sigstore/cosign/v2/pkg/cosign/bundle" - "github.com/sigstore/cosign/v2/pkg/oci" -) - -// DupeDetector scans a list of signatures looking for a duplicate. -type DupeDetector interface { - Find(oci.Signatures, oci.Signature) (oci.Signature, error) -} - -type ReplaceOp interface { - Replace(oci.Signatures, oci.Signature) (oci.Signatures, error) -} - -type SignOption func(*signOpts) - -type signOpts struct { - dd DupeDetector - ro ReplaceOp -} - -func makeSignOpts(opts ...SignOption) *signOpts { - so := &signOpts{} - for _, opt := range opts { - opt(so) - } - return so -} - -// WithDupeDetector configures Sign* to use the following DupeDetector -// to avoid attaching duplicate signatures. -func WithDupeDetector(dd DupeDetector) SignOption { - return func(so *signOpts) { - so.dd = dd - } -} - -func WithReplaceOp(ro ReplaceOp) SignOption { - return func(so *signOpts) { - so.ro = ro - } -} - -type signatureOpts struct { - annotations map[string]string - bundle *bundle.RekorBundle - rfc3161Timestamp *bundle.RFC3161Timestamp - cert []byte - chain []byte - mediaType types.MediaType -} - -type SignatureOption func(*signatureOpts) - -// WithAnnotations specifies the annotations the Signature should have. -func WithAnnotations(annotations map[string]string) SignatureOption { - return func(so *signatureOpts) { - so.annotations = annotations - } -} - -// WithBundle specifies the new Bundle the Signature should have. -func WithBundle(b *bundle.RekorBundle) SignatureOption { - return func(so *signatureOpts) { - so.bundle = b - } -} - -// WithRFC3161Timestamp specifies the new RFC3161Timestamp the Signature should have. -func WithRFC3161Timestamp(b *bundle.RFC3161Timestamp) SignatureOption { - return func(so *signatureOpts) { - so.rfc3161Timestamp = b - } -} - -// WithCertChain specifies the new cert and chain the Signature should have. -func WithCertChain(cert, chain []byte) SignatureOption { - return func(so *signatureOpts) { - so.cert = cert - so.chain = chain - } -} - -// WithMediaType specifies the new MediaType the Signature should have. -func WithMediaType(mediaType types.MediaType) SignatureOption { - return func(so *signatureOpts) { - so.mediaType = mediaType - } -} - -func makeSignatureOption(opts ...SignatureOption) *signatureOpts { - so := &signatureOpts{} - for _, opt := range opts { - opt(so) - } - return so -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/signature.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/signature.go deleted file mode 100644 index f9b36a03ab..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/signature.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mutate - -import ( - "bytes" - "crypto/x509" - "encoding/json" - "fmt" - "io" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" - "github.com/sigstore/cosign/v2/pkg/cosign/bundle" - "github.com/sigstore/cosign/v2/pkg/oci" - "github.com/sigstore/cosign/v2/pkg/oci/static" - "github.com/sigstore/sigstore/pkg/cryptoutils" -) - -type sigWrapper struct { - wrapped oci.Signature - - annotations map[string]string - bundle *bundle.RekorBundle - rfc3161Timestamp *bundle.RFC3161Timestamp - cert *x509.Certificate - chain []*x509.Certificate - mediaType types.MediaType -} - -var _ v1.Layer = (*sigWrapper)(nil) -var _ oci.Signature = (*sigWrapper)(nil) - -func copyAnnotations(ann map[string]string) map[string]string { - new := make(map[string]string, len(ann)) //nolint: revive - for k, v := range ann { - new[k] = v - } - return new -} - -// Annotations implements oci.Signature. -func (sw *sigWrapper) Annotations() (map[string]string, error) { - if sw.annotations != nil { - return copyAnnotations(sw.annotations), nil - } - return sw.wrapped.Annotations() -} - -// Payload implements oci.Signature. -func (sw *sigWrapper) Payload() ([]byte, error) { - return sw.wrapped.Payload() -} - -// Signature implements oci.Signature -func (sw *sigWrapper) Signature() ([]byte, error) { - return sw.wrapped.Signature() -} - -// Base64Signature implements oci.Signature. -func (sw *sigWrapper) Base64Signature() (string, error) { - return sw.wrapped.Base64Signature() -} - -// Cert implements oci.Signature. -func (sw *sigWrapper) Cert() (*x509.Certificate, error) { - if sw.cert != nil { - return sw.cert, nil - } - return sw.wrapped.Cert() -} - -// Chain implements oci.Signature. -func (sw *sigWrapper) Chain() ([]*x509.Certificate, error) { - if sw.chain != nil { - return sw.chain, nil - } - return sw.wrapped.Chain() -} - -// Bundle implements oci.Signature. -func (sw *sigWrapper) Bundle() (*bundle.RekorBundle, error) { - if sw.bundle != nil { - return sw.bundle, nil - } - return sw.wrapped.Bundle() -} - -// RFC3161Timestamp implements oci.Signature. -func (sw *sigWrapper) RFC3161Timestamp() (*bundle.RFC3161Timestamp, error) { - if sw.rfc3161Timestamp != nil { - return sw.rfc3161Timestamp, nil - } - return sw.wrapped.RFC3161Timestamp() -} - -// MediaType implements v1.Layer -func (sw *sigWrapper) MediaType() (types.MediaType, error) { - if sw.mediaType != "" { - return sw.mediaType, nil - } - return sw.wrapped.MediaType() -} - -// Digest implements v1.Layer -func (sw *sigWrapper) Digest() (v1.Hash, error) { - return sw.wrapped.Digest() -} - -// DiffID implements v1.Layer -func (sw *sigWrapper) DiffID() (v1.Hash, error) { - return sw.wrapped.DiffID() -} - -// Compressed implements v1.Layer -func (sw *sigWrapper) Compressed() (io.ReadCloser, error) { - return sw.wrapped.Compressed() -} - -// Uncompressed implements v1.Layer -func (sw *sigWrapper) Uncompressed() (io.ReadCloser, error) { - return sw.wrapped.Uncompressed() -} - -// Size implements v1.Layer -func (sw *sigWrapper) Size() (int64, error) { - return sw.wrapped.Size() -} - -// Signature returns a new oci.Signature based on the provided original, plus the requested mutations. -func Signature(original oci.Signature, opts ...SignatureOption) (oci.Signature, error) { - newSig := sigWrapper{wrapped: original} - - so := makeSignatureOption(opts...) - oldAnn, err := original.Annotations() - if err != nil { - return nil, fmt.Errorf("could not get annotations from signature to mutate: %w", err) - } - - var newAnn map[string]string - if so.annotations != nil { - newAnn = copyAnnotations(so.annotations) - newAnn[static.SignatureAnnotationKey] = oldAnn[static.SignatureAnnotationKey] - for _, key := range []string{static.BundleAnnotationKey, static.CertificateAnnotationKey, static.ChainAnnotationKey, static.RFC3161TimestampAnnotationKey} { - if val, isSet := oldAnn[key]; isSet { - newAnn[key] = val - } else { - delete(newAnn, key) - } - } - } else { - newAnn = copyAnnotations(oldAnn) - } - - if so.bundle != nil { - newSig.bundle = so.bundle - b, err := json.Marshal(so.bundle) - if err != nil { - return nil, err - } - newAnn[static.BundleAnnotationKey] = string(b) - } - - if so.rfc3161Timestamp != nil { - newSig.rfc3161Timestamp = so.rfc3161Timestamp - b, err := json.Marshal(so.rfc3161Timestamp) - if err != nil { - return nil, err - } - newAnn[static.RFC3161TimestampAnnotationKey] = string(b) - } - - if so.cert != nil { - var cert *x509.Certificate - var chain []*x509.Certificate - - certs, err := cryptoutils.LoadCertificatesFromPEM(bytes.NewReader(so.cert)) - if err != nil { - return nil, err - } - newAnn[static.CertificateAnnotationKey] = string(so.cert) - cert = certs[0] - - delete(newAnn, static.ChainAnnotationKey) - if so.chain != nil { - chain, err = cryptoutils.LoadCertificatesFromPEM(bytes.NewReader(so.chain)) - if err != nil { - return nil, err - } - newAnn[static.ChainAnnotationKey] = string(so.chain) - } - - newSig.cert = cert - newSig.chain = chain - } - - if so.mediaType != "" { - newSig.mediaType = so.mediaType - } - - newSig.annotations = newAnn - - return &newSig, nil -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/signatures.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/signatures.go deleted file mode 100644 index 648cab0414..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/signatures.go +++ /dev/null @@ -1,96 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mutate - -import ( - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/empty" - "github.com/google/go-containerregistry/pkg/v1/mutate" - "github.com/sigstore/cosign/v2/pkg/oci" -) - -// AppendSignatures produces a new oci.Signatures with the provided signatures -// appended to the provided base signatures. -func AppendSignatures(base oci.Signatures, sigs ...oci.Signature) (oci.Signatures, error) { - adds := make([]mutate.Addendum, 0, len(sigs)) - for _, sig := range sigs { - ann, err := sig.Annotations() - if err != nil { - return nil, err - } - adds = append(adds, mutate.Addendum{ - Layer: sig, - Annotations: ann, - }) - } - - img, err := mutate.Append(base, adds...) - if err != nil { - return nil, err - } - - return &sigAppender{ - Image: img, - base: base, - sigs: sigs, - }, nil -} - -// ReplaceSignatures produces a new oci.Signatures provided by the base signatures -// replaced with the new oci.Signatures. -func ReplaceSignatures(base oci.Signatures) (oci.Signatures, error) { - sigs, err := base.Get() - if err != nil { - return nil, err - } - adds := make([]mutate.Addendum, 0, len(sigs)) - for _, sig := range sigs { - ann, err := sig.Annotations() - if err != nil { - return nil, err - } - adds = append(adds, mutate.Addendum{ - Layer: sig, - Annotations: ann, - }) - } - img, err := mutate.Append(empty.Image, adds...) - if err != nil { - return nil, err - } - return &sigAppender{ - Image: img, - base: base, - sigs: []oci.Signature{}, - }, nil -} - -type sigAppender struct { - v1.Image - base oci.Signatures - sigs []oci.Signature -} - -var _ oci.Signatures = (*sigAppender)(nil) - -// Get implements oci.Signatures -func (sa *sigAppender) Get() ([]oci.Signature, error) { - sl, err := sa.base.Get() - if err != nil { - return nil, err - } - return append(sl, sa.sigs...), nil -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/digest.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/digest.go deleted file mode 100644 index 204f2ba675..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/digest.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2021 The Sigstore Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "github.com/google/go-containerregistry/pkg/name" -) - -// ResolveDigest returns the digest of the image at the reference. -// -// If the reference is by digest already, it simply extracts the digest. -// Otherwise, it looks up the digest from the registry. -func ResolveDigest(ref name.Reference, opts ...Option) (name.Digest, error) { - o := makeOptions(ref.Context(), opts...) - if d, ok := ref.(name.Digest); ok { - return d, nil - } - desc, err := remoteGet(ref, o.ROpt...) - if err != nil { - return name.Digest{}, err - } - return ref.Context().Digest(desc.Digest.String()), nil -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/image.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/image.go deleted file mode 100644 index 5879833941..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/image.go +++ /dev/null @@ -1,66 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "errors" - "net/http" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/sigstore/cosign/v2/pkg/oci" -) - -var ErrImageNotFound = errors.New("image not found in registry") - -// SignedImage provides access to a remote image reference, and its signatures. -func SignedImage(ref name.Reference, options ...Option) (oci.SignedImage, error) { - o := makeOptions(ref.Context(), options...) - ri, err := remoteImage(ref, o.ROpt...) - var te *transport.Error - if errors.As(err, &te) && te.StatusCode == http.StatusNotFound { - return nil, ErrImageNotFound - } else if err != nil { - return nil, err - } - return &image{ - Image: ri, - opt: o, - }, nil -} - -type image struct { - v1.Image - opt *options -} - -var _ oci.SignedImage = (*image)(nil) - -// Signatures implements oci.SignedImage -func (i *image) Signatures() (oci.Signatures, error) { - return signatures(i, i.opt) -} - -// Attestations implements oci.SignedImage -func (i *image) Attestations() (oci.Signatures, error) { - return attestations(i, i.opt) -} - -// Attestations implements oci.SignedImage -func (i *image) Attachment(name string) (oci.File, error) { - return attachment(i, name, i.opt) -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/index.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/index.go deleted file mode 100644 index 6269e9bfaa..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/index.go +++ /dev/null @@ -1,94 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "errors" - "net/http" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/sigstore/cosign/v2/pkg/oci" -) - -// SignedImageIndex provides access to a remote index reference, and its signatures. -func SignedImageIndex(ref name.Reference, options ...Option) (oci.SignedImageIndex, error) { - o := makeOptions(ref.Context(), options...) - ri, err := remoteIndex(ref, o.ROpt...) - var te *transport.Error - if errors.As(err, &te) && te.StatusCode == http.StatusNotFound { - return nil, errors.New("index not found in registry") - } else if err != nil { - return nil, err - } - return &index{ - v1Index: ri, - ref: ref, - opt: o, - }, nil -} - -// We alias ImageIndex so that we can inline it without the type -// name colliding with the name of a method it had to implement. -type v1Index v1.ImageIndex - -type index struct { - v1Index - ref name.Reference - opt *options -} - -var _ oci.SignedImageIndex = (*index)(nil) - -// Signatures implements oci.SignedImageIndex -func (i *index) Signatures() (oci.Signatures, error) { - return signatures(i, i.opt) -} - -// Attestations implements oci.SignedImageIndex -func (i *index) Attestations() (oci.Signatures, error) { - return attestations(i, i.opt) -} - -// Attestations implements oci.SignedImage -func (i *index) Attachment(name string) (oci.File, error) { - return attachment(i, name, i.opt) -} - -// SignedImage implements oci.SignedImageIndex -func (i *index) SignedImage(h v1.Hash) (oci.SignedImage, error) { - img, err := i.Image(h) - if err != nil { - return nil, err - } - return &image{ - Image: img, - opt: i.opt, - }, nil -} - -// SignedImageIndex implements oci.SignedImageIndex -func (i *index) SignedImageIndex(h v1.Hash) (oci.SignedImageIndex, error) { - ii, err := i.ImageIndex(h) - if err != nil { - return nil, err - } - return &index{ - v1Index: ii, - opt: i.opt, - }, nil -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/options.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/options.go deleted file mode 100644 index 0a7f23842b..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/options.go +++ /dev/null @@ -1,144 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "fmt" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/sigstore/cosign/v2/pkg/cosign/env" -) - -const ( - SignatureTagSuffix = "sig" - SBOMTagSuffix = "sbom" - AttestationTagSuffix = "att" - CustomTagPrefix = "" - - RepoOverrideEnvKey = "COSIGN_REPOSITORY" -) - -// Option is a functional option for remote operations. -type Option func(*options) - -type options struct { - SignatureSuffix string - AttestationSuffix string - SBOMSuffix string - TagPrefix string - TargetRepository name.Repository - ROpt []remote.Option - NameOpts []name.Option - OriginalOptions []Option -} - -var defaultOptions = []remote.Option{ - remote.WithAuthFromKeychain(authn.DefaultKeychain), - // TODO(mattmoor): Incorporate user agent. -} - -func makeOptions(target name.Repository, opts ...Option) *options { - o := &options{ - SignatureSuffix: SignatureTagSuffix, - AttestationSuffix: AttestationTagSuffix, - SBOMSuffix: SBOMTagSuffix, - TagPrefix: CustomTagPrefix, - TargetRepository: target, - ROpt: defaultOptions, - - // Keep the original options around for things that want - // to call something that takes options! - OriginalOptions: opts, - } - - for _, option := range opts { - option(o) - } - - return o -} - -// WithPrefix is a functional option for overriding the default -// tag prefix. -func WithPrefix(prefix string) Option { - return func(o *options) { - o.TagPrefix = prefix - } -} - -// WithSignatureSuffix is a functional option for overriding the default -// signature tag suffix. -func WithSignatureSuffix(suffix string) Option { - return func(o *options) { - o.SignatureSuffix = suffix - } -} - -// WithAttestationSuffix is a functional option for overriding the default -// attestation tag suffix. -func WithAttestationSuffix(suffix string) Option { - return func(o *options) { - o.AttestationSuffix = suffix - } -} - -// WithSBOMSuffix is a functional option for overriding the default -// SBOM tag suffix. -func WithSBOMSuffix(suffix string) Option { - return func(o *options) { - o.SBOMSuffix = suffix - } -} - -// WithRemoteOptions is a functional option for overriding the default -// remote options passed to GGCR. -func WithRemoteOptions(opts ...remote.Option) Option { - return func(o *options) { - o.ROpt = opts - } -} - -// WithTargetRepository is a functional option for overriding the default -// target repository hosting the signature and attestation tags. -func WithTargetRepository(repo name.Repository) Option { - return func(o *options) { - o.TargetRepository = repo - } -} - -// GetEnvTargetRepository returns the Repository specified by -// `os.Getenv(RepoOverrideEnvKey)`, or the empty value if not set. -// Returns an error if the value is set but cannot be parsed. -func GetEnvTargetRepository() (name.Repository, error) { - if ro := env.Getenv(env.VariableRepository); ro != "" { - repo, err := name.NewRepository(ro) - if err != nil { - return name.Repository{}, fmt.Errorf("parsing $"+RepoOverrideEnvKey+": %w", err) - } - return repo, nil - } - return name.Repository{}, nil -} - -// WithNameOptions is a functional option for overriding the default -// name options passed to GGCR. -func WithNameOptions(opts ...name.Option) Option { - return func(o *options) { - o.NameOpts = opts - } -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/referrers.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/referrers.go deleted file mode 100644 index 24cb802eb4..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/referrers.go +++ /dev/null @@ -1,34 +0,0 @@ -// -// Copyright 2023 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/remote" -) - -// Referrers fetches references using registry options. -func Referrers(d name.Digest, artifactType string, opts ...Option) (*v1.IndexManifest, error) { - o := makeOptions(name.Repository{}, opts...) - rOpt := o.ROpt - rOpt = append(rOpt, remote.WithFilter("artifactType", artifactType)) - idx, err := remote.Referrers(d, rOpt...) - if err != nil { - return nil, err - } - return idx.IndexManifest() -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/remote.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/remote.go deleted file mode 100644 index 7827407ce3..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/remote.go +++ /dev/null @@ -1,282 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "errors" - "fmt" - "io" - "net/http" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/google/go-containerregistry/pkg/v1/types" - ociexperimental "github.com/sigstore/cosign/v2/internal/pkg/oci/remote" - "github.com/sigstore/cosign/v2/pkg/oci" -) - -// These enable mocking for unit testing without faking an entire registry. -var ( - remoteImage = remote.Image - remoteIndex = remote.Index - remoteGet = remote.Get - remoteWrite = remote.Write -) - -// EntityNotFoundError is the error that SignedEntity returns when the -// provided ref does not exist. -type EntityNotFoundError struct { - baseErr error -} - -func (e *EntityNotFoundError) Error() string { - return fmt.Sprintf("entity not found in registry, error: %v", e.baseErr) -} - -func NewEntityNotFoundError(err error) error { - return &EntityNotFoundError{ - baseErr: err, - } -} - -// SignedEntity provides access to a remote reference, and its signatures. -// The SignedEntity will be one of SignedImage or SignedImageIndex. -func SignedEntity(ref name.Reference, options ...Option) (oci.SignedEntity, error) { - o := makeOptions(ref.Context(), options...) - - got, err := remoteGet(ref, o.ROpt...) - var te *transport.Error - if errors.As(err, &te) && te.StatusCode == http.StatusNotFound { - return nil, NewEntityNotFoundError(err) - } else if err != nil { - return nil, err - } - - switch got.MediaType { - case types.OCIImageIndex, types.DockerManifestList: - ii, err := got.ImageIndex() - if err != nil { - return nil, err - } - return &index{ - v1Index: ii, - ref: ref.Context().Digest(got.Digest.String()), - opt: o, - }, nil - - case types.OCIManifestSchema1, types.DockerManifestSchema2: - i, err := got.Image() - if err != nil { - return nil, err - } - return &image{ - Image: i, - opt: o, - }, nil - - default: - return nil, fmt.Errorf("unknown mime type: %v", got.MediaType) - } -} - -// normalize turns image digests into tags with optional prefix & suffix: -// sha256:d34db33f -> [prefix]sha256-d34db33f[.suffix] -func normalize(h v1.Hash, prefix string, suffix string) string { - return normalizeWithSeparator(h, prefix, suffix, "-") -} - -// normalizeWithSeparator turns image digests into tags with optional prefix & suffix: -// sha256:d34db33f -> [prefix]sha256[algorithmSeparator]d34db33f[.suffix] -func normalizeWithSeparator(h v1.Hash, prefix string, suffix string, algorithmSeparator string) string { - if suffix == "" { - return fmt.Sprint(prefix, h.Algorithm, algorithmSeparator, h.Hex) - } - return fmt.Sprint(prefix, h.Algorithm, algorithmSeparator, h.Hex, ".", suffix) -} - -// SignatureTag returns the name.Tag that associated signatures with a particular digest. -func SignatureTag(ref name.Reference, opts ...Option) (name.Tag, error) { - o := makeOptions(ref.Context(), opts...) - return suffixTag(ref, o.SignatureSuffix, "-", o) -} - -// AttestationTag returns the name.Tag that associated attestations with a particular digest. -func AttestationTag(ref name.Reference, opts ...Option) (name.Tag, error) { - o := makeOptions(ref.Context(), opts...) - return suffixTag(ref, o.AttestationSuffix, "-", o) -} - -// SBOMTag returns the name.Tag that associated SBOMs with a particular digest. -func SBOMTag(ref name.Reference, opts ...Option) (name.Tag, error) { - o := makeOptions(ref.Context(), opts...) - return suffixTag(ref, o.SBOMSuffix, "-", o) -} - -// DigestTag returns the name.Tag that associated SBOMs with a particular digest. -func DigestTag(ref name.Reference, opts ...Option) (name.Tag, error) { - o := makeOptions(ref.Context(), opts...) - return suffixTag(ref, "", ":", o) -} - -// DockerContentDigest fetches the Docker-Content-Digest header for the referenced tag, -// which is required to delete the object in registry API v2.3 and greater. -// See https://github.com/distribution/distribution/blob/main/docs/content/spec/api.md#deleting-an-image -// and https://github.com/distribution/distribution/issues/1579 -func DockerContentDigest(ref name.Tag, opts ...Option) (name.Tag, error) { - o := makeOptions(ref.Context(), opts...) - desc, err := remoteGet(ref, o.ROpt...) - if err != nil { - return name.Tag{}, err - } - h := desc.Digest - return o.TargetRepository.Tag(normalizeWithSeparator(h, o.TagPrefix, "", ":")), nil -} - -func suffixTag(ref name.Reference, suffix string, algorithmSeparator string, o *options) (name.Tag, error) { - var h v1.Hash - if digest, ok := ref.(name.Digest); ok { - var err error - h, err = v1.NewHash(digest.DigestStr()) - if err != nil { // This is effectively impossible. - return name.Tag{}, err - } - } else { - desc, err := remoteGet(ref, o.ROpt...) - if err != nil { - return name.Tag{}, err - } - h = desc.Digest - } - return o.TargetRepository.Tag(normalizeWithSeparator(h, o.TagPrefix, suffix, algorithmSeparator)), nil -} - -// signatures is a shared implementation of the oci.Signed* Signatures method. -func signatures(digestable oci.SignedEntity, o *options) (oci.Signatures, error) { - h, err := digestable.Digest() - if err != nil { - return nil, err - } - return Signatures(o.TargetRepository.Tag(normalize(h, o.TagPrefix, o.SignatureSuffix)), o.OriginalOptions...) -} - -// attestations is a shared implementation of the oci.Signed* Attestations method. -func attestations(digestable oci.SignedEntity, o *options) (oci.Signatures, error) { - h, err := digestable.Digest() - if err != nil { - return nil, err - } - return Signatures(o.TargetRepository.Tag(normalize(h, o.TagPrefix, o.AttestationSuffix)), o.OriginalOptions...) -} - -// attachment is a shared implementation of the oci.Signed* Attachment method. -func attachment(digestable oci.SignedEntity, attName string, o *options) (oci.File, error) { - // Try using OCI 1.1 behavior - if file, err := attachmentExperimentalOCI(digestable, attName, o); err == nil { - return file, nil - } - - h, err := digestable.Digest() - if err != nil { - return nil, err - } - img, err := SignedImage(o.TargetRepository.Tag(normalize(h, o.TagPrefix, attName)), o.OriginalOptions...) - if err != nil { - return nil, err - } - ls, err := img.Layers() - if err != nil { - return nil, err - } - if len(ls) != 1 { - return nil, fmt.Errorf("expected exactly one layer in attachment, got %d", len(ls)) - } - - return &attached{ - SignedImage: img, - layer: ls[0], - }, nil -} - -type attached struct { - oci.SignedImage - layer v1.Layer -} - -var _ oci.File = (*attached)(nil) - -// FileMediaType implements oci.File -func (f *attached) FileMediaType() (types.MediaType, error) { - return f.layer.MediaType() -} - -// Payload implements oci.File -func (f *attached) Payload() ([]byte, error) { - // remote layers are believed to be stored - // compressed, but we don't compress attachments - // so use "Compressed" to access the raw byte - // stream. - rc, err := f.layer.Compressed() - if err != nil { - return nil, err - } - defer rc.Close() - return io.ReadAll(rc) -} - -// attachmentExperimentalOCI is a shared implementation of the oci.Signed* Attachment method (for OCI 1.1+ behavior). -func attachmentExperimentalOCI(digestable oci.SignedEntity, attName string, o *options) (oci.File, error) { - h, err := digestable.Digest() - if err != nil { - return nil, err - } - d := o.TargetRepository.Digest(h.String()) - - artifactType := ociexperimental.ArtifactType(attName) - index, err := Referrers(d, artifactType, o.OriginalOptions...) - if err != nil { - return nil, err - } - results := index.Manifests - - numResults := len(results) - if numResults == 0 { - return nil, fmt.Errorf("unable to locate reference with artifactType %s", artifactType) - } else if numResults > 1 { - // TODO: if there is more than 1 result.. what does that even mean? - // TODO: use ui.Warn - fmt.Printf("WARNING: there were a total of %d references with artifactType %s\n", numResults, artifactType) - } - // TODO: do this smarter using "created" annotations - lastResult := results[numResults-1] - - img, err := SignedImage(o.TargetRepository.Digest(lastResult.Digest.String()), o.OriginalOptions...) - if err != nil { - return nil, err - } - ls, err := img.Layers() - if err != nil { - return nil, err - } - if len(ls) != 1 { - return nil, fmt.Errorf("expected exactly one layer in attachment, got %d", len(ls)) - } - return &attached{ - SignedImage: img, - layer: ls[0], - }, nil -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/signatures.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/signatures.go deleted file mode 100644 index 635b5e9e07..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/signatures.go +++ /dev/null @@ -1,70 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "errors" - "net/http" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/sigstore/cosign/v2/pkg/oci" - "github.com/sigstore/cosign/v2/pkg/oci/empty" - "github.com/sigstore/cosign/v2/pkg/oci/internal/signature" -) - -// Signatures fetches the signatures image represented by the named reference. -// If the tag is not found, this returns an empty oci.Signatures. -func Signatures(ref name.Reference, opts ...Option) (oci.Signatures, error) { - o := makeOptions(ref.Context(), opts...) - img, err := remoteImage(ref, o.ROpt...) - var te *transport.Error - if errors.As(err, &te) { - if te.StatusCode != http.StatusNotFound { - return nil, te - } - return empty.Signatures(), nil - } else if err != nil { - return nil, err - } - return &sigs{ - Image: img, - }, nil -} - -type sigs struct { - v1.Image -} - -var _ oci.Signatures = (*sigs)(nil) - -// Get implements oci.Signatures -func (s *sigs) Get() ([]oci.Signature, error) { - m, err := s.Manifest() - if err != nil { - return nil, err - } - signatures := make([]oci.Signature, 0, len(m.Layers)) - for _, desc := range m.Layers { - layer, err := s.Image.LayerByDigest(desc.Digest) - if err != nil { - return nil, err - } - signatures = append(signatures, signature.New(layer, desc)) - } - return signatures, nil -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/unknown.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/unknown.go deleted file mode 100644 index 90a0fc9acb..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/unknown.go +++ /dev/null @@ -1,60 +0,0 @@ -// -// Copyright 2023 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/sigstore/cosign/v2/pkg/oci" -) - -// SignedUnknown provides access to signed metadata without directly accessing -// the underlying entity. This can be used to access signature metadata for -// digests that have not been published (yet). -func SignedUnknown(digest name.Digest, options ...Option) oci.SignedEntity { - o := makeOptions(digest.Context(), options...) - return &unknown{ - digest: digest, - opt: o, - } -} - -type unknown struct { - digest name.Digest - opt *options -} - -var _ oci.SignedEntity = (*unknown)(nil) - -// Digest implements digestable -func (i *unknown) Digest() (v1.Hash, error) { - return v1.NewHash(i.digest.DigestStr()) -} - -// Signatures implements oci.SignedEntity -func (i *unknown) Signatures() (oci.Signatures, error) { - return signatures(i, i.opt) -} - -// Attestations implements oci.SignedEntity -func (i *unknown) Attestations() (oci.Signatures, error) { - return attestations(i, i.opt) -} - -// Attachment implements oci.SignedEntity -func (i *unknown) Attachment(name string) (oci.File, error) { - return attachment(i, name, i.opt) -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/write.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/write.go deleted file mode 100644 index d0614768e8..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/write.go +++ /dev/null @@ -1,223 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "encoding/json" - "fmt" - "os" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/google/go-containerregistry/pkg/v1/static" - "github.com/google/go-containerregistry/pkg/v1/types" - ociexperimental "github.com/sigstore/cosign/v2/internal/pkg/oci/remote" - "github.com/sigstore/cosign/v2/pkg/oci" - ctypes "github.com/sigstore/cosign/v2/pkg/types" -) - -// WriteSignedImageIndexImages writes the images within the image index -// This includes the signed image and associated signatures in the image index -// TODO (priyawadhwa@): write the `index.json` itself to the repo as well -// TODO (priyawadhwa@): write the attestations -func WriteSignedImageIndexImages(ref name.Reference, sii oci.SignedImageIndex, opts ...Option) error { - repo := ref.Context() - o := makeOptions(repo, opts...) - - // write the image index if there is one - ii, err := sii.SignedImageIndex(v1.Hash{}) - if err != nil { - return fmt.Errorf("signed image index: %w", err) - } - if ii != nil { - if err := remote.WriteIndex(ref, ii, o.ROpt...); err != nil { - return fmt.Errorf("writing index: %w", err) - } - } - - // write the image if there is one - si, err := sii.SignedImage(v1.Hash{}) - if err != nil { - return fmt.Errorf("signed image: %w", err) - } - if si != nil { - if err := remoteWrite(ref, si, o.ROpt...); err != nil { - return fmt.Errorf("remote write: %w", err) - } - } - - // write the signatures - sigs, err := sii.Signatures() - if err != nil { - return err - } - if sigs != nil { // will be nil if there are no associated signatures - sigsTag, err := SignatureTag(ref, opts...) - if err != nil { - return fmt.Errorf("sigs tag: %w", err) - } - if err := remoteWrite(sigsTag, sigs, o.ROpt...); err != nil { - return err - } - } - - // write the attestations - atts, err := sii.Attestations() - if err != nil { - return err - } - if atts != nil { // will be nil if there are no associated attestations - attsTag, err := AttestationTag(ref, opts...) - if err != nil { - return fmt.Errorf("sigs tag: %w", err) - } - return remoteWrite(attsTag, atts, o.ROpt...) - } - return nil -} - -// WriteSignature publishes the signatures attached to the given entity -// into the provided repository. -func WriteSignatures(repo name.Repository, se oci.SignedEntity, opts ...Option) error { - o := makeOptions(repo, opts...) - - // Access the signature list to publish - sigs, err := se.Signatures() - if err != nil { - return err - } - - // Determine the tag to which these signatures should be published. - h, err := se.Digest() - if err != nil { - return err - } - tag := o.TargetRepository.Tag(normalize(h, o.TagPrefix, o.SignatureSuffix)) - - // Write the Signatures image to the tag, with the provided remote.Options - return remoteWrite(tag, sigs, o.ROpt...) -} - -// WriteAttestations publishes the attestations attached to the given entity -// into the provided repository. -func WriteAttestations(repo name.Repository, se oci.SignedEntity, opts ...Option) error { - o := makeOptions(repo, opts...) - - // Access the signature list to publish - atts, err := se.Attestations() - if err != nil { - return err - } - - // Determine the tag to which these signatures should be published. - h, err := se.Digest() - if err != nil { - return err - } - tag := o.TargetRepository.Tag(normalize(h, o.TagPrefix, o.AttestationSuffix)) - - // Write the Signatures image to the tag, with the provided remote.Options - return remoteWrite(tag, atts, o.ROpt...) -} - -// WriteSignaturesExperimentalOCI publishes the signatures attached to the given entity -// into the provided repository (using OCI 1.1 methods). -func WriteSignaturesExperimentalOCI(d name.Digest, se oci.SignedEntity, opts ...Option) error { - o := makeOptions(d.Repository, opts...) - signTarget := d.String() - ref, err := name.ParseReference(signTarget, o.NameOpts...) - if err != nil { - return err - } - desc, err := remote.Head(ref, o.ROpt...) - if err != nil { - return err - } - sigs, err := se.Signatures() - if err != nil { - return err - } - - // Write the signature blobs - s, err := sigs.Get() - if err != nil { - return err - } - for _, v := range s { - if err := remote.WriteLayer(d.Repository, v, o.ROpt...); err != nil { - return err - } - } - - // Write the config - configBytes, err := sigs.RawConfigFile() - if err != nil { - return err - } - var configDesc v1.Descriptor - if err := json.Unmarshal(configBytes, &configDesc); err != nil { - return err - } - configLayer := static.NewLayer(configBytes, configDesc.MediaType) - if err := remote.WriteLayer(d.Repository, configLayer, o.ROpt...); err != nil { - return err - } - - // Write the manifest containing a subject - b, err := sigs.RawManifest() - if err != nil { - return err - } - var m v1.Manifest - if err := json.Unmarshal(b, &m); err != nil { - return err - } - - artifactType := ociexperimental.ArtifactType("sig") - m.Config.MediaType = types.MediaType(artifactType) - m.Subject = desc - b, err = json.Marshal(&m) - if err != nil { - return err - } - digest, _, err := v1.SHA256(bytes.NewReader(b)) - if err != nil { - return err - } - targetRef, err := name.ParseReference(fmt.Sprintf("%s/%s@%s", d.RegistryStr(), d.RepositoryStr(), digest.String())) - if err != nil { - return err - } - // TODO: use ui.Infof - fmt.Fprintf(os.Stderr, "Uploading signature for [%s] to [%s] with config.mediaType [%s] layers[0].mediaType [%s].\n", - d.String(), targetRef.String(), artifactType, ctypes.SimpleSigningMediaType) - return remote.Put(targetRef, &taggableManifest{raw: b, mediaType: m.MediaType}, o.ROpt...) -} - -type taggableManifest struct { - raw []byte - mediaType types.MediaType -} - -func (taggable taggableManifest) RawManifest() ([]byte, error) { - return taggable.raw, nil -} - -func (taggable taggableManifest) MediaType() (types.MediaType, error) { - return taggable.mediaType, nil -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/signatures.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/signatures.go deleted file mode 100644 index 32f2f890c0..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/signatures.go +++ /dev/null @@ -1,71 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oci - -import ( - "crypto/x509" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/sigstore/cosign/v2/pkg/cosign/bundle" -) - -// Signatures represents a set of signatures that are associated with a particular -// v1.Image. -type Signatures interface { - v1.Image // The low-level representation of the signatures - - // Get retrieves the list of signatures stored. - Get() ([]Signature, error) -} - -// Signature holds a single image signature. -type Signature interface { - v1.Layer - - // Annotations returns the annotations associated with this layer. - Annotations() (map[string]string, error) - - // Payload fetches the opaque data that is being signed. - // This will always return data when there is no error. - Payload() ([]byte, error) - - // Signature fetches the raw signature - // of the payload. This will always return data when - // there is no error. - Signature() ([]byte, error) - - // Base64Signature fetches the base64 encoded signature - // of the payload. This will always return data when - // there is no error. - Base64Signature() (string, error) - - // Cert fetches the optional public key from the key pair that - // was used to sign the payload. - Cert() (*x509.Certificate, error) - - // Chain fetches the optional "full certificate chain" rooted - // at a Fulcio CA, the leaf of which was used to sign the - // payload. - Chain() ([]*x509.Certificate, error) - - // Bundle fetches the optional metadata that records the ephemeral - // Fulcio key in the transparency log. - Bundle() (*bundle.RekorBundle, error) - - // RFC3161Timestamp() fetches the optional metadata that records a - // RFC3161 signed timestamp. - RFC3161Timestamp() (*bundle.RFC3161Timestamp, error) -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/signed/image.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/signed/image.go deleted file mode 100644 index 2bcade64b0..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/signed/image.go +++ /dev/null @@ -1,53 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package signed - -import ( - "errors" - - v1 "github.com/google/go-containerregistry/pkg/v1" - - "github.com/sigstore/cosign/v2/pkg/oci" - "github.com/sigstore/cosign/v2/pkg/oci/empty" -) - -// Image returns an oci.SignedImage form of the v1.Image with no signatures. -func Image(i v1.Image) oci.SignedImage { - return &image{ - Image: i, - } -} - -type image struct { - v1.Image -} - -var _ oci.SignedImage = (*image)(nil) - -// Signatures implements oci.SignedImage -func (*image) Signatures() (oci.Signatures, error) { - return empty.Signatures(), nil -} - -// Attestations implements oci.SignedImage -func (*image) Attestations() (oci.Signatures, error) { - return empty.Signatures(), nil -} - -// Attestations implements oci.SignedImage -func (*image) Attachment(name string) (oci.File, error) { //nolint: revive - return nil, errors.New("unimplemented") -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/signed/index.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/signed/index.go deleted file mode 100644 index b686b4f62e..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/signed/index.go +++ /dev/null @@ -1,74 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package signed - -import ( - "errors" - - v1 "github.com/google/go-containerregistry/pkg/v1" - - "github.com/sigstore/cosign/v2/pkg/oci" - "github.com/sigstore/cosign/v2/pkg/oci/empty" -) - -// ImageIndex returns an oci.SignedImageIndex form of the v1.ImageIndex with -// no signatures. -func ImageIndex(i v1.ImageIndex) oci.SignedImageIndex { - return &index{ - v1Index: i, - } -} - -type v1Index v1.ImageIndex - -type index struct { - v1Index -} - -var _ oci.SignedImageIndex = (*index)(nil) - -// SignedImage implements oci.SignedImageIndex -func (ii *index) SignedImage(h v1.Hash) (oci.SignedImage, error) { - i, err := ii.Image(h) - if err != nil { - return nil, err - } - return Image(i), nil -} - -// SignedImageIndex implements oci.SignedImageIndex -func (ii *index) SignedImageIndex(h v1.Hash) (oci.SignedImageIndex, error) { - i, err := ii.ImageIndex(h) - if err != nil { - return nil, err - } - return ImageIndex(i), nil -} - -// Signatures implements oci.SignedImageIndex -func (*index) Signatures() (oci.Signatures, error) { - return empty.Signatures(), nil -} - -// Attestations implements oci.SignedImageIndex -func (*index) Attestations() (oci.Signatures, error) { - return empty.Signatures(), nil -} - -// Attestations implements oci.SignedImage -func (*index) Attachment(name string) (oci.File, error) { //nolint: revive - return nil, errors.New("unimplemented") -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/file.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/file.go deleted file mode 100644 index 6fc55d8311..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/file.go +++ /dev/null @@ -1,77 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package static - -import ( - "io" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/empty" - "github.com/google/go-containerregistry/pkg/v1/mutate" - "github.com/google/go-containerregistry/pkg/v1/types" - "github.com/sigstore/cosign/v2/pkg/oci" - "github.com/sigstore/cosign/v2/pkg/oci/signed" -) - -// NewFile constructs a new v1.Image with the provided payload. -func NewFile(payload []byte, opts ...Option) (oci.File, error) { - o, err := makeOptions(opts...) - if err != nil { - return nil, err - } - base := mutate.MediaType(empty.Image, types.OCIManifestSchema1) - base = mutate.ConfigMediaType(base, o.ConfigMediaType) - layer := &staticLayer{ - b: payload, - opts: o, - } - img, err := mutate.Append(base, mutate.Addendum{ - Layer: layer, - }) - if err != nil { - return nil, err - } - - // Add annotations from options - img = mutate.Annotations(img, o.Annotations).(v1.Image) - - return &file{ - SignedImage: signed.Image(img), - layer: layer, - }, nil -} - -type file struct { - oci.SignedImage - layer v1.Layer -} - -var _ oci.File = (*file)(nil) - -// FileMediaType implements oci.File -func (f *file) FileMediaType() (types.MediaType, error) { - return f.layer.MediaType() -} - -// Payload implements oci.File -func (f *file) Payload() ([]byte, error) { - rc, err := f.layer.Uncompressed() - if err != nil { - return nil, err - } - defer rc.Close() - return io.ReadAll(rc) -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/options.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/options.go deleted file mode 100644 index 100c988e98..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/options.go +++ /dev/null @@ -1,114 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package static - -import ( - "encoding/json" - - "github.com/google/go-containerregistry/pkg/v1/types" - "github.com/sigstore/cosign/v2/pkg/cosign/bundle" - ctypes "github.com/sigstore/cosign/v2/pkg/types" -) - -// Option is a functional option for customizing static signatures. -type Option func(*options) - -type options struct { - LayerMediaType types.MediaType - ConfigMediaType types.MediaType - Bundle *bundle.RekorBundle - RFC3161Timestamp *bundle.RFC3161Timestamp - Cert []byte - Chain []byte - Annotations map[string]string -} - -func makeOptions(opts ...Option) (*options, error) { - o := &options{ - LayerMediaType: ctypes.SimpleSigningMediaType, - ConfigMediaType: types.OCIConfigJSON, - Annotations: make(map[string]string), - } - - for _, opt := range opts { - opt(o) - } - - if o.Cert != nil { - o.Annotations[CertificateAnnotationKey] = string(o.Cert) - o.Annotations[ChainAnnotationKey] = string(o.Chain) - } - - if o.Bundle != nil { - b, err := json.Marshal(o.Bundle) - if err != nil { - return nil, err - } - o.Annotations[BundleAnnotationKey] = string(b) - } - - if o.RFC3161Timestamp != nil { - b, err := json.Marshal(o.RFC3161Timestamp) - if err != nil { - return nil, err - } - o.Annotations[RFC3161TimestampAnnotationKey] = string(b) - } - return o, nil -} - -// WithLayerMediaType sets the media type of the signature. -func WithLayerMediaType(mt types.MediaType) Option { - return func(o *options) { - o.LayerMediaType = mt - } -} - -// WithConfigMediaType sets the media type of the signature. -func WithConfigMediaType(mt types.MediaType) Option { - return func(o *options) { - o.ConfigMediaType = mt - } -} - -// WithAnnotations sets the annotations that will be associated. -func WithAnnotations(ann map[string]string) Option { - return func(o *options) { - o.Annotations = ann - } -} - -// WithBundle sets the bundle to attach to the signature -func WithBundle(b *bundle.RekorBundle) Option { - return func(o *options) { - o.Bundle = b - } -} - -// WithRFC3161Timestamp sets the time-stamping bundle to attach to the signature -func WithRFC3161Timestamp(b *bundle.RFC3161Timestamp) Option { - return func(o *options) { - o.RFC3161Timestamp = b - } -} - -// WithCertChain sets the certificate chain for this signature. -func WithCertChain(cert, chain []byte) Option { - return func(o *options) { - o.Cert = cert - o.Chain = chain - } -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/signature.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/signature.go deleted file mode 100644 index 406386347f..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/signature.go +++ /dev/null @@ -1,217 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package static - -import ( - "bytes" - "crypto/x509" - "encoding/base64" - "io" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" - "github.com/sigstore/cosign/v2/pkg/cosign/bundle" - "github.com/sigstore/cosign/v2/pkg/oci" - "github.com/sigstore/sigstore/pkg/cryptoutils" -) - -const ( - SignatureAnnotationKey = "dev.cosignproject.cosign/signature" - CertificateAnnotationKey = "dev.sigstore.cosign/certificate" - ChainAnnotationKey = "dev.sigstore.cosign/chain" - BundleAnnotationKey = "dev.sigstore.cosign/bundle" - RFC3161TimestampAnnotationKey = "dev.sigstore.cosign/rfc3161timestamp" -) - -// NewSignature constructs a new oci.Signature from the provided options. -func NewSignature(payload []byte, b64sig string, opts ...Option) (oci.Signature, error) { - o, err := makeOptions(opts...) - if err != nil { - return nil, err - } - return &staticLayer{ - b: payload, - b64sig: b64sig, - opts: o, - }, nil -} - -// NewAttestation constructs a new oci.Signature from the provided options. -// Since Attestation is treated just like a Signature but the actual signature -// is baked into the payload, the Signature does not actually have -// the Base64Signature. -func NewAttestation(payload []byte, opts ...Option) (oci.Signature, error) { - return NewSignature(payload, "", opts...) -} - -// Copy constructs a new oci.Signature from the provided one. -func Copy(sig oci.Signature) (oci.Signature, error) { - payload, err := sig.Payload() - if err != nil { - return nil, err - } - b64sig, err := sig.Base64Signature() - if err != nil { - return nil, err - } - var opts []Option - - mt, err := sig.MediaType() - if err != nil { - return nil, err - } - opts = append(opts, WithLayerMediaType(mt)) - - ann, err := sig.Annotations() - if err != nil { - return nil, err - } - opts = append(opts, WithAnnotations(ann)) - - bundle, err := sig.Bundle() - if err != nil { - return nil, err - } - opts = append(opts, WithBundle(bundle)) - - rfc3161Timestamp, err := sig.RFC3161Timestamp() - if err != nil { - return nil, err - } - opts = append(opts, WithRFC3161Timestamp(rfc3161Timestamp)) - - cert, err := sig.Cert() - if err != nil { - return nil, err - } - if cert != nil { - rawCert, err := cryptoutils.MarshalCertificateToPEM(cert) - if err != nil { - return nil, err - } - chain, err := sig.Chain() - if err != nil { - return nil, err - } - rawChain, err := cryptoutils.MarshalCertificatesToPEM(chain) - if err != nil { - return nil, err - } - opts = append(opts, WithCertChain(rawCert, rawChain)) - } - return NewSignature(payload, b64sig, opts...) -} - -type staticLayer struct { - b []byte - b64sig string - opts *options -} - -var _ v1.Layer = (*staticLayer)(nil) -var _ oci.Signature = (*staticLayer)(nil) - -// Annotations implements oci.Signature -func (l *staticLayer) Annotations() (map[string]string, error) { - m := make(map[string]string, len(l.opts.Annotations)+1) - for k, v := range l.opts.Annotations { - m[k] = v - } - m[SignatureAnnotationKey] = l.b64sig - return m, nil -} - -// Payload implements oci.Signature -func (l *staticLayer) Payload() ([]byte, error) { - return l.b, nil -} - -// Signature implements oci.Signature -func (l *staticLayer) Signature() ([]byte, error) { - b64sig, err := l.Base64Signature() - if err != nil { - return nil, err - } - return base64.StdEncoding.DecodeString(b64sig) -} - -// Base64Signature implements oci.Signature -func (l *staticLayer) Base64Signature() (string, error) { - return l.b64sig, nil -} - -// Cert implements oci.Signature -func (l *staticLayer) Cert() (*x509.Certificate, error) { - certs, err := cryptoutils.LoadCertificatesFromPEM(bytes.NewReader(l.opts.Cert)) - if err != nil { - return nil, err - } - if len(certs) == 0 { - return nil, nil - } - return certs[0], nil -} - -// Chain implements oci.Signature -func (l *staticLayer) Chain() ([]*x509.Certificate, error) { - certs, err := cryptoutils.LoadCertificatesFromPEM(bytes.NewReader(l.opts.Chain)) - if err != nil { - return nil, err - } - return certs, nil -} - -// Bundle implements oci.Signature -func (l *staticLayer) Bundle() (*bundle.RekorBundle, error) { - return l.opts.Bundle, nil -} - -// RFC3161Timestamp implements oci.Signature -func (l *staticLayer) RFC3161Timestamp() (*bundle.RFC3161Timestamp, error) { - return l.opts.RFC3161Timestamp, nil -} - -// Digest implements v1.Layer -func (l *staticLayer) Digest() (v1.Hash, error) { - h, _, err := v1.SHA256(bytes.NewReader(l.b)) - return h, err -} - -// DiffID implements v1.Layer -func (l *staticLayer) DiffID() (v1.Hash, error) { - h, _, err := v1.SHA256(bytes.NewReader(l.b)) - return h, err -} - -// Compressed implements v1.Layer -func (l *staticLayer) Compressed() (io.ReadCloser, error) { - return io.NopCloser(bytes.NewReader(l.b)), nil -} - -// Uncompressed implements v1.Layer -func (l *staticLayer) Uncompressed() (io.ReadCloser, error) { - return io.NopCloser(bytes.NewReader(l.b)), nil -} - -// Size implements v1.Layer -func (l *staticLayer) Size() (int64, error) { - return int64(len(l.b)), nil -} - -// MediaType implements v1.Layer -func (l *staticLayer) MediaType() (types.MediaType, error) { - return l.opts.LayerMediaType, nil -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/walk/walk.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/walk/walk.go deleted file mode 100644 index 097d05bfa3..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/walk/walk.go +++ /dev/null @@ -1,41 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package walk - -import ( - "context" - - "github.com/sigstore/cosign/v2/pkg/oci" - "github.com/sigstore/cosign/v2/pkg/oci/mutate" -) - -// Fn is the signature of the callback supplied to SignedEntity. -// The oci.SignedEntity is either an oci.SignedImageIndex or an oci.SignedImage. -// This callback is called on oci.SignedImageIndex *before* its children. -type Fn func(context.Context, oci.SignedEntity) error - -// SignedEntity calls `fn` on the signed entity and each of its constituent entities -// (`SignedImageIndex` or `SignedImage`) transitively. -// Any errors returned by an `fn` are returned by `Walk`. -func SignedEntity(ctx context.Context, parent oci.SignedEntity, fn Fn) error { - _, err := mutate.Map(ctx, parent, func(ctx context.Context, se oci.SignedEntity) (oci.SignedEntity, error) { - if err := fn(ctx, se); err != nil { - return nil, err - } - return se, nil - }) - return err -} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/types/media.go b/vendor/github.com/sigstore/cosign/v2/pkg/types/media.go deleted file mode 100644 index 4c9f6f4c80..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/types/media.go +++ /dev/null @@ -1,33 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -const ( - JSONInputFormat = "json" - XMLInputFormat = "xml" - TextInputFormat = "text" -) - -const ( - CycloneDXXMLMediaType = "application/vnd.cyclonedx+xml" - CycloneDXJSONMediaType = "application/vnd.cyclonedx+json" - SyftMediaType = "application/vnd.syft+json" - SimpleSigningMediaType = "application/vnd.dev.cosign.simplesigning.v1+json" - SPDXMediaType = "text/spdx" - SPDXJSONMediaType = "text/spdx+json" - WasmLayerMediaType = "application/vnd.wasm.content.layer.v1+wasm" - WasmConfigMediaType = "application/vnd.wasm.config.v1+json" -) diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/types/payload.go b/vendor/github.com/sigstore/cosign/v2/pkg/types/payload.go deleted file mode 100644 index 7dfa8dd49b..0000000000 --- a/vendor/github.com/sigstore/cosign/v2/pkg/types/payload.go +++ /dev/null @@ -1,21 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -const ( - DssePayloadType = "application/vnd.dsse.envelope.v1+json" - IntotoPayloadType = "application/vnd.in-toto+json" -) diff --git a/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md b/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md deleted file mode 100644 index 0113fcadc8..0000000000 --- a/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md +++ /dev/null @@ -1,116 +0,0 @@ -# Contributing - -When contributing to this repository, please first discuss the change you wish -to make via an [issue](https://github.com/sigstore/rekor/issues). - -## Pull Request Process - -1. Create an [issue](https://github.com/sigstore/rekor/issues) - outlining the fix or feature. -2. Fork the rekor repository to your own github account and clone it locally. -3. Hack on your changes. -4. Update the README.md with details of changes to any interface, this includes new environment - variables, exposed ports, useful file locations, CLI parameters and - new or changed configuration values. -5. Correctly format your commit message see [Commit Messages](#Commit Message Guidelines) - below. -6. Ensure that CI passes, if it fails, fix the failures. -7. Every pull request requires a review from the [core rekor team](https://github.com/orgs/github.com/sigstore/teams/core-team) - before merging. -8. If your pull request consists of more than one commit, please squash your - commits as described in [Squash Commits](#Squash Commits) - -## Commit Message Guidelines - -We follow the commit formatting recommendations found on [Chris Beams' How to Write a Git Commit Message article]((https://chris.beams.io/posts/git-commit/). - -Well formed commit messages not only help reviewers understand the nature of -the Pull Request, but also assists the release process where commit messages -are used to generate release notes. - -A good example of a commit message would be as follows: - -``` -Summarize changes in around 50 characters or less - -More detailed explanatory text, if necessary. Wrap it to about 72 -characters or so. In some contexts, the first line is treated as the -subject of the commit and the rest of the text as the body. The -blank line separating the summary from the body is critical (unless -you omit the body entirely); various tools like `log`, `shortlog` -and `rebase` can get confused if you run the two together. - -Explain the problem that this commit is solving. Focus on why you -are making this change as opposed to how (the code explains that). -Are there side effects or other unintuitive consequences of this -change? Here's the place to explain them. - -Further paragraphs come after blank lines. - - - Bullet points are okay, too - - - Typically a hyphen or asterisk is used for the bullet, preceded - by a single space, with blank lines in between, but conventions - vary here - -If you use an issue tracker, put references to them at the bottom, -like this: - -Resolves: #123 -See also: #456, #789 -``` - -Note the `Resolves #123` tag, this references the issue raised and allows us to -ensure issues are associated and closed when a pull request is merged. - -Please refer to [the github help page on message types](https://help.github.com/articles/closing-issues-using-keywords/) -for a complete list of issue references. - -## Squash Commits - -Should your pull request consist of more than one commit (perhaps due to -a change being requested during the review cycle), please perform a git squash -once a reviewer has approved your pull request. - -A squash can be performed as follows. Let's say you have the following commits: - - initial commit - second commit - final commit - -Run the command below with the number set to the total commits you wish to -squash (in our case 3 commits): - - git rebase -i HEAD~3 - -You default text editor will then open up and you will see the following:: - - pick eb36612 initial commit - pick 9ac8968 second commit - pick a760569 final commit - - # Rebase eb1429f..a760569 onto eb1429f (3 commands) - -We want to rebase on top of our first commit, so we change the other two commits -to `squash`: - - pick eb36612 initial commit - squash 9ac8968 second commit - squash a760569 final commit - -After this, should you wish to update your commit message to better summarise -all of your pull request, run: - - git commit --amend - -You will then need to force push (assuming your initial commit(s) were posted -to github): - - git push origin your-branch --force - -Alternatively, a core member can squash your commits within Github. -## Code of Conduct - -Rekor adheres to and enforces the [Contributor Covenant](http://contributor-covenant.org/version/1/4/) Code of Conduct. -Please take a moment to read the [CODE_OF_CONDUCT.md](https://github.com/sigstore/rekor/blob/master/CODE_OF_CONDUCT.md) document. - diff --git a/vendor/github.com/sigstore/rekor/COPYRIGHT.txt b/vendor/github.com/sigstore/rekor/COPYRIGHT.txt deleted file mode 100644 index 7a01c84986..0000000000 --- a/vendor/github.com/sigstore/rekor/COPYRIGHT.txt +++ /dev/null @@ -1,14 +0,0 @@ - -Copyright 2021 The Sigstore Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/sigstore/rekor/LICENSE b/vendor/github.com/sigstore/rekor/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/sigstore/rekor/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go deleted file mode 100644 index 5607679fdf..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Alpine Alpine package -// -// swagger:model alpine -type Alpine struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec AlpineSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Alpine) Kind() string { - return "alpine" -} - -// SetKind sets the kind of this subtype -func (m *Alpine) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Alpine) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec AlpineSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Alpine - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Alpine) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec AlpineSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this alpine -func (m *Alpine) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Alpine) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Alpine) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this alpine based on the context it is used -func (m *Alpine) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Alpine) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Alpine) UnmarshalBinary(b []byte) error { - var res Alpine - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go deleted file mode 100644 index edd25408bb..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// AlpineSchema Alpine Package Schema -// -// # Schema for Alpine package objects -// -// swagger:model alpineSchema -type AlpineSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go deleted file mode 100644 index 250a6125b3..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go +++ /dev/null @@ -1,455 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// AlpineV001Schema Alpine v0.0.1 Schema -// -// # Schema for Alpine Package entries -// -// swagger:model alpineV001Schema -type AlpineV001Schema struct { - - // package - // Required: true - Package *AlpineV001SchemaPackage `json:"package"` - - // public key - // Required: true - PublicKey *AlpineV001SchemaPublicKey `json:"publicKey"` -} - -// Validate validates this alpine v001 schema -func (m *AlpineV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePackage(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *AlpineV001Schema) validatePackage(formats strfmt.Registry) error { - - if err := validate.Required("package", "body", m.Package); err != nil { - return err - } - - if m.Package != nil { - if err := m.Package.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package") - } - return err - } - } - - return nil -} - -func (m *AlpineV001Schema) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this alpine v001 schema based on the context it is used -func (m *AlpineV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePackage(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *AlpineV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error { - - if m.Package != nil { - - if err := m.Package.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package") - } - return err - } - } - - return nil -} - -func (m *AlpineV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *AlpineV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *AlpineV001Schema) UnmarshalBinary(b []byte) error { - var res AlpineV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// AlpineV001SchemaPackage Information about the package associated with the entry -// -// swagger:model AlpineV001SchemaPackage -type AlpineV001SchemaPackage struct { - - // Specifies the package inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // hash - Hash *AlpineV001SchemaPackageHash `json:"hash,omitempty"` - - // Values of the .PKGINFO key / value pairs - // Read Only: true - Pkginfo map[string]string `json:"pkginfo,omitempty"` -} - -// Validate validates this alpine v001 schema package -func (m *AlpineV001SchemaPackage) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *AlpineV001SchemaPackage) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package" + "." + "hash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this alpine v001 schema package based on the context it is used -func (m *AlpineV001SchemaPackage) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePkginfo(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *AlpineV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *AlpineV001SchemaPackage) contextValidatePkginfo(ctx context.Context, formats strfmt.Registry) error { - - return nil -} - -// MarshalBinary interface implementation -func (m *AlpineV001SchemaPackage) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *AlpineV001SchemaPackage) UnmarshalBinary(b []byte) error { - var res AlpineV001SchemaPackage - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// AlpineV001SchemaPackageHash Specifies the hash algorithm and value for the package -// -// swagger:model AlpineV001SchemaPackageHash -type AlpineV001SchemaPackageHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the package - // Required: true - Value *string `json:"value"` -} - -// Validate validates this alpine v001 schema package hash -func (m *AlpineV001SchemaPackageHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var alpineV001SchemaPackageHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - alpineV001SchemaPackageHashTypeAlgorithmPropEnum = append(alpineV001SchemaPackageHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // AlpineV001SchemaPackageHashAlgorithmSha256 captures enum value "sha256" - AlpineV001SchemaPackageHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *AlpineV001SchemaPackageHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, alpineV001SchemaPackageHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *AlpineV001SchemaPackageHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("package"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("package"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *AlpineV001SchemaPackageHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("package"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this alpine v001 schema package hash based on the context it is used -func (m *AlpineV001SchemaPackageHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *AlpineV001SchemaPackageHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *AlpineV001SchemaPackageHash) UnmarshalBinary(b []byte) error { - var res AlpineV001SchemaPackageHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// AlpineV001SchemaPublicKey The public key that can verify the package signature -// -// swagger:model AlpineV001SchemaPublicKey -type AlpineV001SchemaPublicKey struct { - - // Specifies the content of the public key inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this alpine v001 schema public key -func (m *AlpineV001SchemaPublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *AlpineV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this alpine v001 schema public key based on context it is used -func (m *AlpineV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *AlpineV001SchemaPublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *AlpineV001SchemaPublicKey) UnmarshalBinary(b []byte) error { - var res AlpineV001SchemaPublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go deleted file mode 100644 index 804ddd11a9..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go +++ /dev/null @@ -1,118 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// ConsistencyProof consistency proof -// -// swagger:model ConsistencyProof -type ConsistencyProof struct { - - // hashes - // Required: true - Hashes []string `json:"hashes"` - - // The hash value stored at the root of the merkle tree at the time the proof was generated - // Required: true - // Pattern: ^[0-9a-fA-F]{64}$ - RootHash *string `json:"rootHash"` -} - -// Validate validates this consistency proof -func (m *ConsistencyProof) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHashes(formats); err != nil { - res = append(res, err) - } - - if err := m.validateRootHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *ConsistencyProof) validateHashes(formats strfmt.Registry) error { - - if err := validate.Required("hashes", "body", m.Hashes); err != nil { - return err - } - - for i := 0; i < len(m.Hashes); i++ { - - if err := validate.Pattern("hashes"+"."+strconv.Itoa(i), "body", m.Hashes[i], `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - } - - return nil -} - -func (m *ConsistencyProof) validateRootHash(formats strfmt.Registry) error { - - if err := validate.Required("rootHash", "body", m.RootHash); err != nil { - return err - } - - if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this consistency proof based on context it is used -func (m *ConsistencyProof) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *ConsistencyProof) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *ConsistencyProof) UnmarshalBinary(b []byte) error { - var res ConsistencyProof - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go deleted file mode 100644 index 8de4083baf..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Cose COSE object -// -// swagger:model cose -type Cose struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec CoseSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Cose) Kind() string { - return "cose" -} - -// SetKind sets the kind of this subtype -func (m *Cose) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Cose) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec CoseSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Cose - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Cose) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec CoseSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this cose -func (m *Cose) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Cose) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Cose) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this cose based on the context it is used -func (m *Cose) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Cose) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Cose) UnmarshalBinary(b []byte) error { - var res Cose - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go deleted file mode 100644 index e653f22029..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// CoseSchema COSE Schema -// -// # COSE for Rekord objects -// -// swagger:model coseSchema -type CoseSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go deleted file mode 100644 index 93dce8715a..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go +++ /dev/null @@ -1,521 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// CoseV001Schema cose v0.0.1 Schema -// -// # Schema for cose object -// -// swagger:model coseV001Schema -type CoseV001Schema struct { - - // data - Data *CoseV001SchemaData `json:"data,omitempty"` - - // The COSE Sign1 Message - // Format: byte - Message strfmt.Base64 `json:"message,omitempty"` - - // The public key that can verify the signature - // Required: true - // Format: byte - PublicKey *strfmt.Base64 `json:"publicKey"` -} - -// Validate validates this cose v001 schema -func (m *CoseV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateData(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *CoseV001Schema) validateData(formats strfmt.Registry) error { - if swag.IsZero(m.Data) { // not required - return nil - } - - if m.Data != nil { - if err := m.Data.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -func (m *CoseV001Schema) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this cose v001 schema based on the context it is used -func (m *CoseV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateData(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *CoseV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { - - if m.Data != nil { - - if swag.IsZero(m.Data) { // not required - return nil - } - - if err := m.Data.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *CoseV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *CoseV001Schema) UnmarshalBinary(b []byte) error { - var res CoseV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// CoseV001SchemaData Information about the content associated with the entry -// -// swagger:model CoseV001SchemaData -type CoseV001SchemaData struct { - - // Specifies the additional authenticated data required to verify the signature - // Format: byte - Aad strfmt.Base64 `json:"aad,omitempty"` - - // envelope hash - EnvelopeHash *CoseV001SchemaDataEnvelopeHash `json:"envelopeHash,omitempty"` - - // payload hash - PayloadHash *CoseV001SchemaDataPayloadHash `json:"payloadHash,omitempty"` -} - -// Validate validates this cose v001 schema data -func (m *CoseV001SchemaData) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEnvelopeHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePayloadHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *CoseV001SchemaData) validateEnvelopeHash(formats strfmt.Registry) error { - if swag.IsZero(m.EnvelopeHash) { // not required - return nil - } - - if m.EnvelopeHash != nil { - if err := m.EnvelopeHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "envelopeHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "envelopeHash") - } - return err - } - } - - return nil -} - -func (m *CoseV001SchemaData) validatePayloadHash(formats strfmt.Registry) error { - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if m.PayloadHash != nil { - if err := m.PayloadHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this cose v001 schema data based on the context it is used -func (m *CoseV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateEnvelopeHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePayloadHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *CoseV001SchemaData) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error { - - if m.EnvelopeHash != nil { - - if swag.IsZero(m.EnvelopeHash) { // not required - return nil - } - - if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "envelopeHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "envelopeHash") - } - return err - } - } - - return nil -} - -func (m *CoseV001SchemaData) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { - - if m.PayloadHash != nil { - - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *CoseV001SchemaData) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *CoseV001SchemaData) UnmarshalBinary(b []byte) error { - var res CoseV001SchemaData - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// CoseV001SchemaDataEnvelopeHash Specifies the hash algorithm and value for the COSE envelope -// -// swagger:model CoseV001SchemaDataEnvelopeHash -type CoseV001SchemaDataEnvelopeHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the envelope - // Required: true - Value *string `json:"value"` -} - -// Validate validates this cose v001 schema data envelope hash -func (m *CoseV001SchemaDataEnvelopeHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum = append(coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // CoseV001SchemaDataEnvelopeHashAlgorithmSha256 captures enum value "sha256" - CoseV001SchemaDataEnvelopeHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *CoseV001SchemaDataEnvelopeHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *CoseV001SchemaDataEnvelopeHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"envelopeHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("data"+"."+"envelopeHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *CoseV001SchemaDataEnvelopeHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"envelopeHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this cose v001 schema data envelope hash based on the context it is used -func (m *CoseV001SchemaDataEnvelopeHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *CoseV001SchemaDataEnvelopeHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *CoseV001SchemaDataEnvelopeHash) UnmarshalBinary(b []byte) error { - var res CoseV001SchemaDataEnvelopeHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// CoseV001SchemaDataPayloadHash Specifies the hash algorithm and value for the content -// -// swagger:model CoseV001SchemaDataPayloadHash -type CoseV001SchemaDataPayloadHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the content - // Required: true - Value *string `json:"value"` -} - -// Validate validates this cose v001 schema data payload hash -func (m *CoseV001SchemaDataPayloadHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum = append(coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // CoseV001SchemaDataPayloadHashAlgorithmSha256 captures enum value "sha256" - CoseV001SchemaDataPayloadHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *CoseV001SchemaDataPayloadHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *CoseV001SchemaDataPayloadHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("data"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *CoseV001SchemaDataPayloadHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this cose v001 schema data payload hash based on the context it is used -func (m *CoseV001SchemaDataPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *CoseV001SchemaDataPayloadHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *CoseV001SchemaDataPayloadHash) UnmarshalBinary(b []byte) error { - var res CoseV001SchemaDataPayloadHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go deleted file mode 100644 index dde562054c..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// DSSE DSSE envelope -// -// swagger:model dsse -type DSSE struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec DSSESchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *DSSE) Kind() string { - return "dsse" -} - -// SetKind sets the kind of this subtype -func (m *DSSE) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *DSSE) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec DSSESchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result DSSE - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m DSSE) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec DSSESchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this dsse -func (m *DSSE) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *DSSE) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *DSSE) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this dsse based on the context it is used -func (m *DSSE) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *DSSE) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSE) UnmarshalBinary(b []byte) error { - var res DSSE - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go deleted file mode 100644 index 7795626438..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// DSSESchema DSSE Schema -// -// log entry schema for dsse envelopes -// -// swagger:model dsseSchema -type DSSESchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go deleted file mode 100644 index ec4c32bfb1..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go +++ /dev/null @@ -1,685 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// DSSEV001Schema DSSE v0.0.1 Schema -// -// # Schema for DSSE envelopes -// -// swagger:model dsseV001Schema -type DSSEV001Schema struct { - - // envelope hash - EnvelopeHash *DSSEV001SchemaEnvelopeHash `json:"envelopeHash,omitempty"` - - // payload hash - PayloadHash *DSSEV001SchemaPayloadHash `json:"payloadHash,omitempty"` - - // proposed content - ProposedContent *DSSEV001SchemaProposedContent `json:"proposedContent,omitempty"` - - // extracted collection of all signatures of the envelope's payload; elements will be sorted by lexicographical order of the base64 encoded signature strings - // Read Only: true - // Min Items: 1 - Signatures []*DSSEV001SchemaSignaturesItems0 `json:"signatures"` -} - -// Validate validates this dsse v001 schema -func (m *DSSEV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEnvelopeHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePayloadHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateProposedContent(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignatures(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *DSSEV001Schema) validateEnvelopeHash(formats strfmt.Registry) error { - if swag.IsZero(m.EnvelopeHash) { // not required - return nil - } - - if m.EnvelopeHash != nil { - if err := m.EnvelopeHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("envelopeHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("envelopeHash") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) validatePayloadHash(formats strfmt.Registry) error { - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if m.PayloadHash != nil { - if err := m.PayloadHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("payloadHash") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) validateProposedContent(formats strfmt.Registry) error { - if swag.IsZero(m.ProposedContent) { // not required - return nil - } - - if m.ProposedContent != nil { - if err := m.ProposedContent.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("proposedContent") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("proposedContent") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) validateSignatures(formats strfmt.Registry) error { - if swag.IsZero(m.Signatures) { // not required - return nil - } - - iSignaturesSize := int64(len(m.Signatures)) - - if err := validate.MinItems("signatures", "body", iSignaturesSize, 1); err != nil { - return err - } - - for i := 0; i < len(m.Signatures); i++ { - if swag.IsZero(m.Signatures[i]) { // not required - continue - } - - if m.Signatures[i] != nil { - if err := m.Signatures[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signatures" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signatures" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// ContextValidate validate this dsse v001 schema based on the context it is used -func (m *DSSEV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateEnvelopeHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePayloadHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateProposedContent(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSignatures(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *DSSEV001Schema) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error { - - if m.EnvelopeHash != nil { - - if swag.IsZero(m.EnvelopeHash) { // not required - return nil - } - - if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("envelopeHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("envelopeHash") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { - - if m.PayloadHash != nil { - - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("payloadHash") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) contextValidateProposedContent(ctx context.Context, formats strfmt.Registry) error { - - if m.ProposedContent != nil { - - if swag.IsZero(m.ProposedContent) { // not required - return nil - } - - if err := m.ProposedContent.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("proposedContent") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("proposedContent") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) contextValidateSignatures(ctx context.Context, formats strfmt.Registry) error { - - if err := validate.ReadOnly(ctx, "signatures", "body", []*DSSEV001SchemaSignaturesItems0(m.Signatures)); err != nil { - return err - } - - for i := 0; i < len(m.Signatures); i++ { - - if m.Signatures[i] != nil { - - if swag.IsZero(m.Signatures[i]) { // not required - return nil - } - - if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signatures" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signatures" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *DSSEV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSEV001Schema) UnmarshalBinary(b []byte) error { - var res DSSEV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// DSSEV001SchemaEnvelopeHash Specifies the hash algorithm and value encompassing the entire envelope sent to Rekor -// -// swagger:model DSSEV001SchemaEnvelopeHash -type DSSEV001SchemaEnvelopeHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The value of the computed digest over the entire envelope - // Required: true - Value *string `json:"value"` -} - -// Validate validates this DSSE v001 schema envelope hash -func (m *DSSEV001SchemaEnvelopeHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum = append(dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // DSSEV001SchemaEnvelopeHashAlgorithmSha256 captures enum value "sha256" - DSSEV001SchemaEnvelopeHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("envelopeHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("envelopeHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *DSSEV001SchemaEnvelopeHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("envelopeHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this DSSE v001 schema envelope hash based on the context it is used -func (m *DSSEV001SchemaEnvelopeHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *DSSEV001SchemaEnvelopeHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSEV001SchemaEnvelopeHash) UnmarshalBinary(b []byte) error { - var res DSSEV001SchemaEnvelopeHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// DSSEV001SchemaPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope -// -// swagger:model DSSEV001SchemaPayloadHash -type DSSEV001SchemaPayloadHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The value of the computed digest over the payload within the envelope - // Required: true - Value *string `json:"value"` -} - -// Validate validates this DSSE v001 schema payload hash -func (m *DSSEV001SchemaPayloadHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var dsseV001SchemaPayloadHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - dsseV001SchemaPayloadHashTypeAlgorithmPropEnum = append(dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // DSSEV001SchemaPayloadHashAlgorithmSha256 captures enum value "sha256" - DSSEV001SchemaPayloadHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *DSSEV001SchemaPayloadHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *DSSEV001SchemaPayloadHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *DSSEV001SchemaPayloadHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("payloadHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this DSSE v001 schema payload hash based on the context it is used -func (m *DSSEV001SchemaPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *DSSEV001SchemaPayloadHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSEV001SchemaPayloadHash) UnmarshalBinary(b []byte) error { - var res DSSEV001SchemaPayloadHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// DSSEV001SchemaProposedContent DSSE v001 schema proposed content -// -// swagger:model DSSEV001SchemaProposedContent -type DSSEV001SchemaProposedContent struct { - - // DSSE envelope specified as a stringified JSON object - // Required: true - Envelope *string `json:"envelope"` - - // collection of all verification material (e.g. public keys or certificates) used to verify signatures over envelope's payload, specified as base64-encoded strings - // Required: true - // Min Items: 1 - Verifiers []strfmt.Base64 `json:"verifiers"` -} - -// Validate validates this DSSE v001 schema proposed content -func (m *DSSEV001SchemaProposedContent) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEnvelope(formats); err != nil { - res = append(res, err) - } - - if err := m.validateVerifiers(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *DSSEV001SchemaProposedContent) validateEnvelope(formats strfmt.Registry) error { - - if err := validate.Required("proposedContent"+"."+"envelope", "body", m.Envelope); err != nil { - return err - } - - return nil -} - -func (m *DSSEV001SchemaProposedContent) validateVerifiers(formats strfmt.Registry) error { - - if err := validate.Required("proposedContent"+"."+"verifiers", "body", m.Verifiers); err != nil { - return err - } - - iVerifiersSize := int64(len(m.Verifiers)) - - if err := validate.MinItems("proposedContent"+"."+"verifiers", "body", iVerifiersSize, 1); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this DSSE v001 schema proposed content based on context it is used -func (m *DSSEV001SchemaProposedContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *DSSEV001SchemaProposedContent) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSEV001SchemaProposedContent) UnmarshalBinary(b []byte) error { - var res DSSEV001SchemaProposedContent - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// DSSEV001SchemaSignaturesItems0 a signature of the envelope's payload along with the verification material for the signature -// -// swagger:model DSSEV001SchemaSignaturesItems0 -type DSSEV001SchemaSignaturesItems0 struct { - - // base64 encoded signature of the payload - // Required: true - // Pattern: ^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$ - Signature *string `json:"signature"` - - // verification material that was used to verify the corresponding signature, specified as a base64 encoded string - // Required: true - // Format: byte - Verifier *strfmt.Base64 `json:"verifier"` -} - -// Validate validates this DSSE v001 schema signatures items0 -func (m *DSSEV001SchemaSignaturesItems0) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateSignature(formats); err != nil { - res = append(res, err) - } - - if err := m.validateVerifier(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *DSSEV001SchemaSignaturesItems0) validateSignature(formats strfmt.Registry) error { - - if err := validate.Required("signature", "body", m.Signature); err != nil { - return err - } - - if err := validate.Pattern("signature", "body", *m.Signature, `^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$`); err != nil { - return err - } - - return nil -} - -func (m *DSSEV001SchemaSignaturesItems0) validateVerifier(formats strfmt.Registry) error { - - if err := validate.Required("verifier", "body", m.Verifier); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this DSSE v001 schema signatures items0 based on context it is used -func (m *DSSEV001SchemaSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *DSSEV001SchemaSignaturesItems0) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSEV001SchemaSignaturesItems0) UnmarshalBinary(b []byte) error { - var res DSSEV001SchemaSignaturesItems0 - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go deleted file mode 100644 index ac14f2026e..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go +++ /dev/null @@ -1,69 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// Error error -// -// swagger:model Error -type Error struct { - - // code - Code int64 `json:"code,omitempty"` - - // message - Message string `json:"message,omitempty"` -} - -// Validate validates this error -func (m *Error) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this error based on context it is used -func (m *Error) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *Error) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Error) UnmarshalBinary(b []byte) error { - var res Error - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go deleted file mode 100644 index b3e1f8a3bd..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Hashedrekord Hashed Rekord object -// -// swagger:model hashedrekord -type Hashedrekord struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HashedrekordSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Hashedrekord) Kind() string { - return "hashedrekord" -} - -// SetKind sets the kind of this subtype -func (m *Hashedrekord) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Hashedrekord) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HashedrekordSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Hashedrekord - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Hashedrekord) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HashedrekordSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this hashedrekord -func (m *Hashedrekord) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Hashedrekord) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Hashedrekord) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this hashedrekord based on the context it is used -func (m *Hashedrekord) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Hashedrekord) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Hashedrekord) UnmarshalBinary(b []byte) error { - var res Hashedrekord - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go deleted file mode 100644 index 387a9392bb..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// HashedrekordSchema Rekor Schema -// -// # Schema for Rekord objects -// -// swagger:model hashedrekordSchema -type HashedrekordSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go deleted file mode 100644 index f8bf233ed7..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go +++ /dev/null @@ -1,513 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// HashedrekordV001Schema Hashed Rekor v0.0.1 Schema -// -// # Schema for Hashed Rekord object -// -// swagger:model hashedrekordV001Schema -type HashedrekordV001Schema struct { - - // data - // Required: true - Data *HashedrekordV001SchemaData `json:"data"` - - // signature - // Required: true - Signature *HashedrekordV001SchemaSignature `json:"signature"` -} - -// Validate validates this hashedrekord v001 schema -func (m *HashedrekordV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateData(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignature(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001Schema) validateData(formats strfmt.Registry) error { - - if err := validate.Required("data", "body", m.Data); err != nil { - return err - } - - if m.Data != nil { - if err := m.Data.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -func (m *HashedrekordV001Schema) validateSignature(formats strfmt.Registry) error { - - if err := validate.Required("signature", "body", m.Signature); err != nil { - return err - } - - if m.Signature != nil { - if err := m.Signature.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// ContextValidate validate this hashedrekord v001 schema based on the context it is used -func (m *HashedrekordV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateData(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSignature(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { - - if m.Data != nil { - - if err := m.Data.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -func (m *HashedrekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { - - if m.Signature != nil { - - if err := m.Signature.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HashedrekordV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HashedrekordV001Schema) UnmarshalBinary(b []byte) error { - var res HashedrekordV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HashedrekordV001SchemaData Information about the content associated with the entry -// -// swagger:model HashedrekordV001SchemaData -type HashedrekordV001SchemaData struct { - - // hash - Hash *HashedrekordV001SchemaDataHash `json:"hash,omitempty"` -} - -// Validate validates this hashedrekord v001 schema data -func (m *HashedrekordV001SchemaData) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001SchemaData) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "hash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this hashedrekord v001 schema data based on the context it is used -func (m *HashedrekordV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "hash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HashedrekordV001SchemaData) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HashedrekordV001SchemaData) UnmarshalBinary(b []byte) error { - var res HashedrekordV001SchemaData - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HashedrekordV001SchemaDataHash Specifies the hash algorithm and value for the content -// -// swagger:model HashedrekordV001SchemaDataHash -type HashedrekordV001SchemaDataHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the content - // Required: true - Value *string `json:"value"` -} - -// Validate validates this hashedrekord v001 schema data hash -func (m *HashedrekordV001SchemaDataHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum = append(hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // HashedrekordV001SchemaDataHashAlgorithmSha256 captures enum value "sha256" - HashedrekordV001SchemaDataHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *HashedrekordV001SchemaDataHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *HashedrekordV001SchemaDataHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("data"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *HashedrekordV001SchemaDataHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this hashedrekord v001 schema data hash based on context it is used -func (m *HashedrekordV001SchemaDataHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *HashedrekordV001SchemaDataHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HashedrekordV001SchemaDataHash) UnmarshalBinary(b []byte) error { - var res HashedrekordV001SchemaDataHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HashedrekordV001SchemaSignature Information about the detached signature associated with the entry -// -// swagger:model HashedrekordV001SchemaSignature -type HashedrekordV001SchemaSignature struct { - - // Specifies the content of the signature inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // public key - PublicKey *HashedrekordV001SchemaSignaturePublicKey `json:"publicKey,omitempty"` -} - -// Validate validates this hashedrekord v001 schema signature -func (m *HashedrekordV001SchemaSignature) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { - if swag.IsZero(m.PublicKey) { // not required - return nil - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this hashedrekord v001 schema signature based on the context it is used -func (m *HashedrekordV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if swag.IsZero(m.PublicKey) { // not required - return nil - } - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HashedrekordV001SchemaSignature) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HashedrekordV001SchemaSignature) UnmarshalBinary(b []byte) error { - var res HashedrekordV001SchemaSignature - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HashedrekordV001SchemaSignaturePublicKey The public key that can verify the signature; this can also be an X509 code signing certificate that contains the raw public key information -// -// swagger:model HashedrekordV001SchemaSignaturePublicKey -type HashedrekordV001SchemaSignaturePublicKey struct { - - // Specifies the content of the public key or code signing certificate inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` -} - -// Validate validates this hashedrekord v001 schema signature public key -func (m *HashedrekordV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this hashedrekord v001 schema signature public key based on context it is used -func (m *HashedrekordV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *HashedrekordV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HashedrekordV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error { - var res HashedrekordV001SchemaSignaturePublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go deleted file mode 100644 index d19b8bc8c9..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Helm Helm chart -// -// swagger:model helm -type Helm struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HelmSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Helm) Kind() string { - return "helm" -} - -// SetKind sets the kind of this subtype -func (m *Helm) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Helm) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HelmSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Helm - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Helm) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HelmSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this helm -func (m *Helm) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Helm) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Helm) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this helm based on the context it is used -func (m *Helm) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Helm) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Helm) UnmarshalBinary(b []byte) error { - var res Helm - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go deleted file mode 100644 index 0ab87df9ce..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// HelmSchema Helm Schema -// -// # Schema for Helm objects -// -// swagger:model helmSchema -type HelmSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go deleted file mode 100644 index 930efc8786..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go +++ /dev/null @@ -1,662 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// HelmV001Schema Helm v0.0.1 Schema -// -// # Schema for Helm object -// -// swagger:model helmV001Schema -type HelmV001Schema struct { - - // chart - // Required: true - Chart *HelmV001SchemaChart `json:"chart"` - - // public key - // Required: true - PublicKey *HelmV001SchemaPublicKey `json:"publicKey"` -} - -// Validate validates this helm v001 schema -func (m *HelmV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateChart(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001Schema) validateChart(formats strfmt.Registry) error { - - if err := validate.Required("chart", "body", m.Chart); err != nil { - return err - } - - if m.Chart != nil { - if err := m.Chart.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart") - } - return err - } - } - - return nil -} - -func (m *HelmV001Schema) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this helm v001 schema based on the context it is used -func (m *HelmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateChart(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001Schema) contextValidateChart(ctx context.Context, formats strfmt.Registry) error { - - if m.Chart != nil { - - if err := m.Chart.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart") - } - return err - } - } - - return nil -} - -func (m *HelmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001Schema) UnmarshalBinary(b []byte) error { - var res HelmV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HelmV001SchemaChart Information about the Helm chart associated with the entry -// -// swagger:model HelmV001SchemaChart -type HelmV001SchemaChart struct { - - // hash - Hash *HelmV001SchemaChartHash `json:"hash,omitempty"` - - // provenance - // Required: true - Provenance *HelmV001SchemaChartProvenance `json:"provenance"` -} - -// Validate validates this helm v001 schema chart -func (m *HelmV001SchemaChart) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateProvenance(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChart) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *HelmV001SchemaChart) validateProvenance(formats strfmt.Registry) error { - - if err := validate.Required("chart"+"."+"provenance", "body", m.Provenance); err != nil { - return err - } - - if m.Provenance != nil { - if err := m.Provenance.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "provenance") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "provenance") - } - return err - } - } - - return nil -} - -// ContextValidate validate this helm v001 schema chart based on the context it is used -func (m *HelmV001SchemaChart) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateProvenance(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChart) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *HelmV001SchemaChart) contextValidateProvenance(ctx context.Context, formats strfmt.Registry) error { - - if m.Provenance != nil { - - if err := m.Provenance.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "provenance") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "provenance") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001SchemaChart) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001SchemaChart) UnmarshalBinary(b []byte) error { - var res HelmV001SchemaChart - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HelmV001SchemaChartHash Specifies the hash algorithm and value for the chart -// -// swagger:model HelmV001SchemaChartHash -type HelmV001SchemaChartHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the chart - // Required: true - Value *string `json:"value"` -} - -// Validate validates this helm v001 schema chart hash -func (m *HelmV001SchemaChartHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var helmV001SchemaChartHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - helmV001SchemaChartHashTypeAlgorithmPropEnum = append(helmV001SchemaChartHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // HelmV001SchemaChartHashAlgorithmSha256 captures enum value "sha256" - HelmV001SchemaChartHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *HelmV001SchemaChartHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, helmV001SchemaChartHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *HelmV001SchemaChartHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("chart"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("chart"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *HelmV001SchemaChartHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("chart"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this helm v001 schema chart hash based on the context it is used -func (m *HelmV001SchemaChartHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001SchemaChartHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001SchemaChartHash) UnmarshalBinary(b []byte) error { - var res HelmV001SchemaChartHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HelmV001SchemaChartProvenance The provenance entry associated with the signed Helm Chart -// -// swagger:model HelmV001SchemaChartProvenance -type HelmV001SchemaChartProvenance struct { - - // Specifies the content of the provenance file inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // signature - Signature *HelmV001SchemaChartProvenanceSignature `json:"signature,omitempty"` -} - -// Validate validates this helm v001 schema chart provenance -func (m *HelmV001SchemaChartProvenance) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateSignature(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChartProvenance) validateSignature(formats strfmt.Registry) error { - if swag.IsZero(m.Signature) { // not required - return nil - } - - if m.Signature != nil { - if err := m.Signature.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "provenance" + "." + "signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "provenance" + "." + "signature") - } - return err - } - } - - return nil -} - -// ContextValidate validate this helm v001 schema chart provenance based on the context it is used -func (m *HelmV001SchemaChartProvenance) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateSignature(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChartProvenance) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { - - if m.Signature != nil { - - if swag.IsZero(m.Signature) { // not required - return nil - } - - if err := m.Signature.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "provenance" + "." + "signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "provenance" + "." + "signature") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001SchemaChartProvenance) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001SchemaChartProvenance) UnmarshalBinary(b []byte) error { - var res HelmV001SchemaChartProvenance - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HelmV001SchemaChartProvenanceSignature Information about the included signature in the provenance file -// -// swagger:model HelmV001SchemaChartProvenanceSignature -type HelmV001SchemaChartProvenanceSignature struct { - - // Specifies the signature embedded within the provenance file - // Required: true - // Read Only: true - // Format: byte - Content strfmt.Base64 `json:"content"` -} - -// Validate validates this helm v001 schema chart provenance signature -func (m *HelmV001SchemaChartProvenanceSignature) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChartProvenanceSignature) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("chart"+"."+"provenance"+"."+"signature"+"."+"content", "body", strfmt.Base64(m.Content)); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this helm v001 schema chart provenance signature based on the context it is used -func (m *HelmV001SchemaChartProvenanceSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateContent(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChartProvenanceSignature) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { - - if err := validate.ReadOnly(ctx, "chart"+"."+"provenance"+"."+"signature"+"."+"content", "body", strfmt.Base64(m.Content)); err != nil { - return err - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001SchemaChartProvenanceSignature) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001SchemaChartProvenanceSignature) UnmarshalBinary(b []byte) error { - var res HelmV001SchemaChartProvenanceSignature - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HelmV001SchemaPublicKey The public key that can verify the package signature -// -// swagger:model HelmV001SchemaPublicKey -type HelmV001SchemaPublicKey struct { - - // Specifies the content of the public key inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this helm v001 schema public key -func (m *HelmV001SchemaPublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this helm v001 schema public key based on context it is used -func (m *HelmV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001SchemaPublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001SchemaPublicKey) UnmarshalBinary(b []byte) error { - var res HelmV001SchemaPublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go deleted file mode 100644 index c555eb2da6..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// InactiveShardLogInfo inactive shard log info -// -// swagger:model InactiveShardLogInfo -type InactiveShardLogInfo struct { - - // The current hash value stored at the root of the merkle tree - // Required: true - // Pattern: ^[0-9a-fA-F]{64}$ - RootHash *string `json:"rootHash"` - - // The current signed tree head - // Required: true - SignedTreeHead *string `json:"signedTreeHead"` - - // The current treeID - // Required: true - // Pattern: ^[0-9]+$ - TreeID *string `json:"treeID"` - - // The current number of nodes in the merkle tree - // Required: true - // Minimum: 1 - TreeSize *int64 `json:"treeSize"` -} - -// Validate validates this inactive shard log info -func (m *InactiveShardLogInfo) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateRootHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignedTreeHead(formats); err != nil { - res = append(res, err) - } - - if err := m.validateTreeID(formats); err != nil { - res = append(res, err) - } - - if err := m.validateTreeSize(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *InactiveShardLogInfo) validateRootHash(formats strfmt.Registry) error { - - if err := validate.Required("rootHash", "body", m.RootHash); err != nil { - return err - } - - if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - return nil -} - -func (m *InactiveShardLogInfo) validateSignedTreeHead(formats strfmt.Registry) error { - - if err := validate.Required("signedTreeHead", "body", m.SignedTreeHead); err != nil { - return err - } - - return nil -} - -func (m *InactiveShardLogInfo) validateTreeID(formats strfmt.Registry) error { - - if err := validate.Required("treeID", "body", m.TreeID); err != nil { - return err - } - - if err := validate.Pattern("treeID", "body", *m.TreeID, `^[0-9]+$`); err != nil { - return err - } - - return nil -} - -func (m *InactiveShardLogInfo) validateTreeSize(formats strfmt.Registry) error { - - if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { - return err - } - - if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this inactive shard log info based on context it is used -func (m *InactiveShardLogInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *InactiveShardLogInfo) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *InactiveShardLogInfo) UnmarshalBinary(b []byte) error { - var res InactiveShardLogInfo - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go deleted file mode 100644 index 86f0d7b94e..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// InclusionProof inclusion proof -// -// swagger:model InclusionProof -type InclusionProof struct { - - // The checkpoint (signed tree head) that the inclusion proof is based on - // Required: true - Checkpoint *string `json:"checkpoint"` - - // A list of hashes required to compute the inclusion proof, sorted in order from leaf to root - // Required: true - Hashes []string `json:"hashes"` - - // The index of the entry in the transparency log - // Required: true - // Minimum: 0 - LogIndex *int64 `json:"logIndex"` - - // The hash value stored at the root of the merkle tree at the time the proof was generated - // Required: true - // Pattern: ^[0-9a-fA-F]{64}$ - RootHash *string `json:"rootHash"` - - // The size of the merkle tree at the time the inclusion proof was generated - // Required: true - // Minimum: 1 - TreeSize *int64 `json:"treeSize"` -} - -// Validate validates this inclusion proof -func (m *InclusionProof) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateCheckpoint(formats); err != nil { - res = append(res, err) - } - - if err := m.validateHashes(formats); err != nil { - res = append(res, err) - } - - if err := m.validateLogIndex(formats); err != nil { - res = append(res, err) - } - - if err := m.validateRootHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateTreeSize(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *InclusionProof) validateCheckpoint(formats strfmt.Registry) error { - - if err := validate.Required("checkpoint", "body", m.Checkpoint); err != nil { - return err - } - - return nil -} - -func (m *InclusionProof) validateHashes(formats strfmt.Registry) error { - - if err := validate.Required("hashes", "body", m.Hashes); err != nil { - return err - } - - for i := 0; i < len(m.Hashes); i++ { - - if err := validate.Pattern("hashes"+"."+strconv.Itoa(i), "body", m.Hashes[i], `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - } - - return nil -} - -func (m *InclusionProof) validateLogIndex(formats strfmt.Registry) error { - - if err := validate.Required("logIndex", "body", m.LogIndex); err != nil { - return err - } - - if err := validate.MinimumInt("logIndex", "body", *m.LogIndex, 0, false); err != nil { - return err - } - - return nil -} - -func (m *InclusionProof) validateRootHash(formats strfmt.Registry) error { - - if err := validate.Required("rootHash", "body", m.RootHash); err != nil { - return err - } - - if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - return nil -} - -func (m *InclusionProof) validateTreeSize(formats strfmt.Registry) error { - - if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { - return err - } - - if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this inclusion proof based on context it is used -func (m *InclusionProof) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *InclusionProof) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *InclusionProof) UnmarshalBinary(b []byte) error { - var res InclusionProof - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go deleted file mode 100644 index 4f208de1d5..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Intoto Intoto object -// -// swagger:model intoto -type Intoto struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec IntotoSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Intoto) Kind() string { - return "intoto" -} - -// SetKind sets the kind of this subtype -func (m *Intoto) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Intoto) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec IntotoSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Intoto - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Intoto) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec IntotoSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this intoto -func (m *Intoto) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Intoto) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Intoto) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this intoto based on the context it is used -func (m *Intoto) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Intoto) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Intoto) UnmarshalBinary(b []byte) error { - var res Intoto - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go deleted file mode 100644 index a7fdaa6a6d..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// IntotoSchema Intoto Schema -// -// # Intoto for Rekord objects -// -// swagger:model intotoSchema -type IntotoSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go deleted file mode 100644 index 0c299b1ca5..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go +++ /dev/null @@ -1,514 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// IntotoV001Schema intoto v0.0.1 Schema -// -// # Schema for intoto object -// -// swagger:model intotoV001Schema -type IntotoV001Schema struct { - - // content - // Required: true - Content *IntotoV001SchemaContent `json:"content"` - - // The public key that can verify the signature - // Required: true - // Format: byte - PublicKey *strfmt.Base64 `json:"publicKey"` -} - -// Validate validates this intoto v001 schema -func (m *IntotoV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV001Schema) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("content", "body", m.Content); err != nil { - return err - } - - if m.Content != nil { - if err := m.Content.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content") - } - return err - } - } - - return nil -} - -func (m *IntotoV001Schema) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this intoto v001 schema based on the context it is used -func (m *IntotoV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateContent(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV001Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { - - if m.Content != nil { - - if err := m.Content.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV001Schema) UnmarshalBinary(b []byte) error { - var res IntotoV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV001SchemaContent intoto v001 schema content -// -// swagger:model IntotoV001SchemaContent -type IntotoV001SchemaContent struct { - - // envelope - Envelope string `json:"envelope,omitempty"` - - // hash - Hash *IntotoV001SchemaContentHash `json:"hash,omitempty"` - - // payload hash - PayloadHash *IntotoV001SchemaContentPayloadHash `json:"payloadHash,omitempty"` -} - -// Validate validates this intoto v001 schema content -func (m *IntotoV001SchemaContent) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePayloadHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV001SchemaContent) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *IntotoV001SchemaContent) validatePayloadHash(formats strfmt.Registry) error { - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if m.PayloadHash != nil { - if err := m.PayloadHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this intoto v001 schema content based on the context it is used -func (m *IntotoV001SchemaContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePayloadHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV001SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *IntotoV001SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { - - if m.PayloadHash != nil { - - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV001SchemaContent) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV001SchemaContent) UnmarshalBinary(b []byte) error { - var res IntotoV001SchemaContent - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV001SchemaContentHash Specifies the hash algorithm and value encompassing the entire signed envelope; this is computed by the rekor server, client-provided values are ignored -// -// swagger:model IntotoV001SchemaContentHash -type IntotoV001SchemaContentHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the archive - // Required: true - Value *string `json:"value"` -} - -// Validate validates this intoto v001 schema content hash -func (m *IntotoV001SchemaContentHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var intotoV001SchemaContentHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - intotoV001SchemaContentHashTypeAlgorithmPropEnum = append(intotoV001SchemaContentHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // IntotoV001SchemaContentHashAlgorithmSha256 captures enum value "sha256" - IntotoV001SchemaContentHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *IntotoV001SchemaContentHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, intotoV001SchemaContentHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *IntotoV001SchemaContentHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("content"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *IntotoV001SchemaContentHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this intoto v001 schema content hash based on the context it is used -func (m *IntotoV001SchemaContentHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV001SchemaContentHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV001SchemaContentHash) UnmarshalBinary(b []byte) error { - var res IntotoV001SchemaContentHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV001SchemaContentPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope; this is computed by the rekor server, client-provided values are ignored -// -// swagger:model IntotoV001SchemaContentPayloadHash -type IntotoV001SchemaContentPayloadHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the envelope's payload - // Required: true - Value *string `json:"value"` -} - -// Validate validates this intoto v001 schema content payload hash -func (m *IntotoV001SchemaContentPayloadHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum = append(intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // IntotoV001SchemaContentPayloadHashAlgorithmSha256 captures enum value "sha256" - IntotoV001SchemaContentPayloadHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *IntotoV001SchemaContentPayloadHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *IntotoV001SchemaContentPayloadHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("content"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *IntotoV001SchemaContentPayloadHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this intoto v001 schema content payload hash based on the context it is used -func (m *IntotoV001SchemaContentPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV001SchemaContentPayloadHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV001SchemaContentPayloadHash) UnmarshalBinary(b []byte) error { - var res IntotoV001SchemaContentPayloadHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go deleted file mode 100644 index c2c08ea546..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go +++ /dev/null @@ -1,757 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// IntotoV002Schema intoto v0.0.2 Schema -// -// # Schema for intoto object -// -// swagger:model intotoV002Schema -type IntotoV002Schema struct { - - // content - // Required: true - Content *IntotoV002SchemaContent `json:"content"` -} - -// Validate validates this intoto v002 schema -func (m *IntotoV002Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002Schema) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("content", "body", m.Content); err != nil { - return err - } - - if m.Content != nil { - if err := m.Content.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content") - } - return err - } - } - - return nil -} - -// ContextValidate validate this intoto v002 schema based on the context it is used -func (m *IntotoV002Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateContent(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { - - if m.Content != nil { - - if err := m.Content.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002Schema) UnmarshalBinary(b []byte) error { - var res IntotoV002Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV002SchemaContent intoto v002 schema content -// -// swagger:model IntotoV002SchemaContent -type IntotoV002SchemaContent struct { - - // envelope - // Required: true - Envelope *IntotoV002SchemaContentEnvelope `json:"envelope"` - - // hash - Hash *IntotoV002SchemaContentHash `json:"hash,omitempty"` - - // payload hash - PayloadHash *IntotoV002SchemaContentPayloadHash `json:"payloadHash,omitempty"` -} - -// Validate validates this intoto v002 schema content -func (m *IntotoV002SchemaContent) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEnvelope(formats); err != nil { - res = append(res, err) - } - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePayloadHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002SchemaContent) validateEnvelope(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"envelope", "body", m.Envelope); err != nil { - return err - } - - if m.Envelope != nil { - if err := m.Envelope.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "envelope") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "envelope") - } - return err - } - } - - return nil -} - -func (m *IntotoV002SchemaContent) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *IntotoV002SchemaContent) validatePayloadHash(formats strfmt.Registry) error { - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if m.PayloadHash != nil { - if err := m.PayloadHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this intoto v002 schema content based on the context it is used -func (m *IntotoV002SchemaContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateEnvelope(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePayloadHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002SchemaContent) contextValidateEnvelope(ctx context.Context, formats strfmt.Registry) error { - - if m.Envelope != nil { - - if err := m.Envelope.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "envelope") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "envelope") - } - return err - } - } - - return nil -} - -func (m *IntotoV002SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *IntotoV002SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { - - if m.PayloadHash != nil { - - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002SchemaContent) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002SchemaContent) UnmarshalBinary(b []byte) error { - var res IntotoV002SchemaContent - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV002SchemaContentEnvelope dsse envelope -// -// swagger:model IntotoV002SchemaContentEnvelope -type IntotoV002SchemaContentEnvelope struct { - - // payload of the envelope - // Format: byte - Payload strfmt.Base64 `json:"payload,omitempty"` - - // type describing the payload - // Required: true - PayloadType *string `json:"payloadType"` - - // collection of all signatures of the envelope's payload - // Required: true - // Min Items: 1 - Signatures []*IntotoV002SchemaContentEnvelopeSignaturesItems0 `json:"signatures"` -} - -// Validate validates this intoto v002 schema content envelope -func (m *IntotoV002SchemaContentEnvelope) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePayloadType(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignatures(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002SchemaContentEnvelope) validatePayloadType(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"envelope"+"."+"payloadType", "body", m.PayloadType); err != nil { - return err - } - - return nil -} - -func (m *IntotoV002SchemaContentEnvelope) validateSignatures(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"envelope"+"."+"signatures", "body", m.Signatures); err != nil { - return err - } - - iSignaturesSize := int64(len(m.Signatures)) - - if err := validate.MinItems("content"+"."+"envelope"+"."+"signatures", "body", iSignaturesSize, 1); err != nil { - return err - } - - for i := 0; i < len(m.Signatures); i++ { - if swag.IsZero(m.Signatures[i]) { // not required - continue - } - - if m.Signatures[i] != nil { - if err := m.Signatures[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// ContextValidate validate this intoto v002 schema content envelope based on the context it is used -func (m *IntotoV002SchemaContentEnvelope) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateSignatures(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002SchemaContentEnvelope) contextValidateSignatures(ctx context.Context, formats strfmt.Registry) error { - - for i := 0; i < len(m.Signatures); i++ { - - if m.Signatures[i] != nil { - - if swag.IsZero(m.Signatures[i]) { // not required - return nil - } - - if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002SchemaContentEnvelope) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002SchemaContentEnvelope) UnmarshalBinary(b []byte) error { - var res IntotoV002SchemaContentEnvelope - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV002SchemaContentEnvelopeSignaturesItems0 a signature of the envelope's payload along with the public key for the signature -// -// swagger:model IntotoV002SchemaContentEnvelopeSignaturesItems0 -type IntotoV002SchemaContentEnvelopeSignaturesItems0 struct { - - // optional id of the key used to create the signature - Keyid string `json:"keyid,omitempty"` - - // public key that corresponds to this signature - // Required: true - // Format: byte - PublicKey *strfmt.Base64 `json:"publicKey"` - - // signature of the payload - // Required: true - // Format: byte - Sig *strfmt.Base64 `json:"sig"` -} - -// Validate validates this intoto v002 schema content envelope signatures items0 -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSig(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - return nil -} - -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validateSig(formats strfmt.Registry) error { - - if err := validate.Required("sig", "body", m.Sig); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this intoto v002 schema content envelope signatures items0 based on context it is used -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) UnmarshalBinary(b []byte) error { - var res IntotoV002SchemaContentEnvelopeSignaturesItems0 - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV002SchemaContentHash Specifies the hash algorithm and value encompassing the entire signed envelope -// -// swagger:model IntotoV002SchemaContentHash -type IntotoV002SchemaContentHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the archive - // Required: true - Value *string `json:"value"` -} - -// Validate validates this intoto v002 schema content hash -func (m *IntotoV002SchemaContentHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var intotoV002SchemaContentHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - intotoV002SchemaContentHashTypeAlgorithmPropEnum = append(intotoV002SchemaContentHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // IntotoV002SchemaContentHashAlgorithmSha256 captures enum value "sha256" - IntotoV002SchemaContentHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *IntotoV002SchemaContentHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, intotoV002SchemaContentHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *IntotoV002SchemaContentHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("content"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *IntotoV002SchemaContentHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this intoto v002 schema content hash based on the context it is used -func (m *IntotoV002SchemaContentHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002SchemaContentHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002SchemaContentHash) UnmarshalBinary(b []byte) error { - var res IntotoV002SchemaContentHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV002SchemaContentPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope -// -// swagger:model IntotoV002SchemaContentPayloadHash -type IntotoV002SchemaContentPayloadHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value of the payload - // Required: true - Value *string `json:"value"` -} - -// Validate validates this intoto v002 schema content payload hash -func (m *IntotoV002SchemaContentPayloadHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum = append(intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // IntotoV002SchemaContentPayloadHashAlgorithmSha256 captures enum value "sha256" - IntotoV002SchemaContentPayloadHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *IntotoV002SchemaContentPayloadHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *IntotoV002SchemaContentPayloadHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("content"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *IntotoV002SchemaContentPayloadHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this intoto v002 schema content payload hash based on the context it is used -func (m *IntotoV002SchemaContentPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002SchemaContentPayloadHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002SchemaContentPayloadHash) UnmarshalBinary(b []byte) error { - var res IntotoV002SchemaContentPayloadHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go deleted file mode 100644 index 3df3d21b8a..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Jar Java Archive (JAR) -// -// swagger:model jar -type Jar struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec JarSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Jar) Kind() string { - return "jar" -} - -// SetKind sets the kind of this subtype -func (m *Jar) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Jar) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec JarSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Jar - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Jar) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec JarSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this jar -func (m *Jar) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Jar) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Jar) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this jar based on the context it is used -func (m *Jar) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Jar) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Jar) UnmarshalBinary(b []byte) error { - var res Jar - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go deleted file mode 100644 index e7b9a590ed..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// JarSchema JAR Schema -// -// # Schema for JAR objects -// -// swagger:model jarSchema -type JarSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go deleted file mode 100644 index 4564964a5a..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go +++ /dev/null @@ -1,569 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// JarV001Schema JAR v0.0.1 Schema -// -// # Schema for JAR entries -// -// swagger:model jarV001Schema -type JarV001Schema struct { - - // archive - // Required: true - Archive *JarV001SchemaArchive `json:"archive"` - - // signature - Signature *JarV001SchemaSignature `json:"signature,omitempty"` -} - -// Validate validates this jar v001 schema -func (m *JarV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateArchive(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignature(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001Schema) validateArchive(formats strfmt.Registry) error { - - if err := validate.Required("archive", "body", m.Archive); err != nil { - return err - } - - if m.Archive != nil { - if err := m.Archive.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("archive") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("archive") - } - return err - } - } - - return nil -} - -func (m *JarV001Schema) validateSignature(formats strfmt.Registry) error { - if swag.IsZero(m.Signature) { // not required - return nil - } - - if m.Signature != nil { - if err := m.Signature.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// ContextValidate validate this jar v001 schema based on the context it is used -func (m *JarV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateArchive(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSignature(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001Schema) contextValidateArchive(ctx context.Context, formats strfmt.Registry) error { - - if m.Archive != nil { - - if err := m.Archive.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("archive") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("archive") - } - return err - } - } - - return nil -} - -func (m *JarV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { - - if m.Signature != nil { - - if swag.IsZero(m.Signature) { // not required - return nil - } - - if err := m.Signature.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *JarV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *JarV001Schema) UnmarshalBinary(b []byte) error { - var res JarV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// JarV001SchemaArchive Information about the archive associated with the entry -// -// swagger:model JarV001SchemaArchive -type JarV001SchemaArchive struct { - - // Specifies the archive inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // hash - Hash *JarV001SchemaArchiveHash `json:"hash,omitempty"` -} - -// Validate validates this jar v001 schema archive -func (m *JarV001SchemaArchive) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001SchemaArchive) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("archive" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("archive" + "." + "hash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this jar v001 schema archive based on the context it is used -func (m *JarV001SchemaArchive) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001SchemaArchive) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("archive" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("archive" + "." + "hash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *JarV001SchemaArchive) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *JarV001SchemaArchive) UnmarshalBinary(b []byte) error { - var res JarV001SchemaArchive - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// JarV001SchemaArchiveHash Specifies the hash algorithm and value encompassing the entire signed archive -// -// swagger:model JarV001SchemaArchiveHash -type JarV001SchemaArchiveHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the archive - // Required: true - Value *string `json:"value"` -} - -// Validate validates this jar v001 schema archive hash -func (m *JarV001SchemaArchiveHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var jarV001SchemaArchiveHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - jarV001SchemaArchiveHashTypeAlgorithmPropEnum = append(jarV001SchemaArchiveHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // JarV001SchemaArchiveHashAlgorithmSha256 captures enum value "sha256" - JarV001SchemaArchiveHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *JarV001SchemaArchiveHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, jarV001SchemaArchiveHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *JarV001SchemaArchiveHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("archive"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("archive"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *JarV001SchemaArchiveHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("archive"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this jar v001 schema archive hash based on context it is used -func (m *JarV001SchemaArchiveHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *JarV001SchemaArchiveHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *JarV001SchemaArchiveHash) UnmarshalBinary(b []byte) error { - var res JarV001SchemaArchiveHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// JarV001SchemaSignature Information about the included signature in the JAR file -// -// swagger:model JarV001SchemaSignature -type JarV001SchemaSignature struct { - - // Specifies the PKCS7 signature embedded within the JAR file - // Required: true - // Read Only: true - // Format: byte - Content strfmt.Base64 `json:"content"` - - // public key - // Required: true - PublicKey *JarV001SchemaSignaturePublicKey `json:"publicKey"` -} - -// Validate validates this jar v001 schema signature -func (m *JarV001SchemaSignature) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001SchemaSignature) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"content", "body", strfmt.Base64(m.Content)); err != nil { - return err - } - - return nil -} - -func (m *JarV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"publicKey", "body", m.PublicKey); err != nil { - return err - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this jar v001 schema signature based on the context it is used -func (m *JarV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateContent(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001SchemaSignature) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { - - if err := validate.ReadOnly(ctx, "signature"+"."+"content", "body", strfmt.Base64(m.Content)); err != nil { - return err - } - - return nil -} - -func (m *JarV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *JarV001SchemaSignature) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *JarV001SchemaSignature) UnmarshalBinary(b []byte) error { - var res JarV001SchemaSignature - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// JarV001SchemaSignaturePublicKey The X509 certificate containing the public key JAR which verifies the signature of the JAR -// -// swagger:model JarV001SchemaSignaturePublicKey -type JarV001SchemaSignaturePublicKey struct { - - // Specifies the content of the X509 certificate containing the public key used to verify the signature - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this jar v001 schema signature public key -func (m *JarV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001SchemaSignaturePublicKey) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"publicKey"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this jar v001 schema signature public key based on the context it is used -func (m *JarV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *JarV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *JarV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error { - var res JarV001SchemaSignaturePublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go deleted file mode 100644 index ee32ded414..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go +++ /dev/null @@ -1,445 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// LogEntry log entry -// -// swagger:model LogEntry -type LogEntry map[string]LogEntryAnon - -// Validate validates this log entry -func (m LogEntry) Validate(formats strfmt.Registry) error { - var res []error - - for k := range m { - - if swag.IsZero(m[k]) { // not required - continue - } - if val, ok := m[k]; ok { - if err := val.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName(k) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName(k) - } - return err - } - } - - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// ContextValidate validate this log entry based on the context it is used -func (m LogEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - for k := range m { - - if val, ok := m[k]; ok { - if err := val.ContextValidate(ctx, formats); err != nil { - return err - } - } - - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// LogEntryAnon log entry anon -// -// swagger:model LogEntryAnon -type LogEntryAnon struct { - - // attestation - Attestation *LogEntryAnonAttestation `json:"attestation,omitempty"` - - // body - // Required: true - Body interface{} `json:"body"` - - // The time the entry was added to the log as a Unix timestamp in seconds - // Required: true - IntegratedTime *int64 `json:"integratedTime"` - - // This is the SHA256 hash of the DER-encoded public key for the log at the time the entry was included in the log - // Required: true - // Pattern: ^[0-9a-fA-F]{64}$ - LogID *string `json:"logID"` - - // log index - // Required: true - // Minimum: 0 - LogIndex *int64 `json:"logIndex"` - - // verification - Verification *LogEntryAnonVerification `json:"verification,omitempty"` -} - -// Validate validates this log entry anon -func (m *LogEntryAnon) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAttestation(formats); err != nil { - res = append(res, err) - } - - if err := m.validateBody(formats); err != nil { - res = append(res, err) - } - - if err := m.validateIntegratedTime(formats); err != nil { - res = append(res, err) - } - - if err := m.validateLogID(formats); err != nil { - res = append(res, err) - } - - if err := m.validateLogIndex(formats); err != nil { - res = append(res, err) - } - - if err := m.validateVerification(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogEntryAnon) validateAttestation(formats strfmt.Registry) error { - if swag.IsZero(m.Attestation) { // not required - return nil - } - - if m.Attestation != nil { - if err := m.Attestation.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("attestation") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("attestation") - } - return err - } - } - - return nil -} - -func (m *LogEntryAnon) validateBody(formats strfmt.Registry) error { - - if m.Body == nil { - return errors.Required("body", "body", nil) - } - - return nil -} - -func (m *LogEntryAnon) validateIntegratedTime(formats strfmt.Registry) error { - - if err := validate.Required("integratedTime", "body", m.IntegratedTime); err != nil { - return err - } - - return nil -} - -func (m *LogEntryAnon) validateLogID(formats strfmt.Registry) error { - - if err := validate.Required("logID", "body", m.LogID); err != nil { - return err - } - - if err := validate.Pattern("logID", "body", *m.LogID, `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - return nil -} - -func (m *LogEntryAnon) validateLogIndex(formats strfmt.Registry) error { - - if err := validate.Required("logIndex", "body", m.LogIndex); err != nil { - return err - } - - if err := validate.MinimumInt("logIndex", "body", *m.LogIndex, 0, false); err != nil { - return err - } - - return nil -} - -func (m *LogEntryAnon) validateVerification(formats strfmt.Registry) error { - if swag.IsZero(m.Verification) { // not required - return nil - } - - if m.Verification != nil { - if err := m.Verification.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("verification") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("verification") - } - return err - } - } - - return nil -} - -// ContextValidate validate this log entry anon based on the context it is used -func (m *LogEntryAnon) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateAttestation(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateVerification(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogEntryAnon) contextValidateAttestation(ctx context.Context, formats strfmt.Registry) error { - - if m.Attestation != nil { - - if swag.IsZero(m.Attestation) { // not required - return nil - } - - if err := m.Attestation.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("attestation") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("attestation") - } - return err - } - } - - return nil -} - -func (m *LogEntryAnon) contextValidateVerification(ctx context.Context, formats strfmt.Registry) error { - - if m.Verification != nil { - - if swag.IsZero(m.Verification) { // not required - return nil - } - - if err := m.Verification.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("verification") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("verification") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *LogEntryAnon) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *LogEntryAnon) UnmarshalBinary(b []byte) error { - var res LogEntryAnon - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// LogEntryAnonAttestation log entry anon attestation -// -// swagger:model LogEntryAnonAttestation -type LogEntryAnonAttestation struct { - - // data - // Format: byte - Data strfmt.Base64 `json:"data,omitempty"` -} - -// Validate validates this log entry anon attestation -func (m *LogEntryAnonAttestation) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this log entry anon attestation based on context it is used -func (m *LogEntryAnonAttestation) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *LogEntryAnonAttestation) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *LogEntryAnonAttestation) UnmarshalBinary(b []byte) error { - var res LogEntryAnonAttestation - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// LogEntryAnonVerification log entry anon verification -// -// swagger:model LogEntryAnonVerification -type LogEntryAnonVerification struct { - - // inclusion proof - InclusionProof *InclusionProof `json:"inclusionProof,omitempty"` - - // Signature over the logID, logIndex, body and integratedTime. - // Format: byte - SignedEntryTimestamp strfmt.Base64 `json:"signedEntryTimestamp,omitempty"` -} - -// Validate validates this log entry anon verification -func (m *LogEntryAnonVerification) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateInclusionProof(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogEntryAnonVerification) validateInclusionProof(formats strfmt.Registry) error { - if swag.IsZero(m.InclusionProof) { // not required - return nil - } - - if m.InclusionProof != nil { - if err := m.InclusionProof.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("verification" + "." + "inclusionProof") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("verification" + "." + "inclusionProof") - } - return err - } - } - - return nil -} - -// ContextValidate validate this log entry anon verification based on the context it is used -func (m *LogEntryAnonVerification) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateInclusionProof(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogEntryAnonVerification) contextValidateInclusionProof(ctx context.Context, formats strfmt.Registry) error { - - if m.InclusionProof != nil { - - if swag.IsZero(m.InclusionProof) { // not required - return nil - } - - if err := m.InclusionProof.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("verification" + "." + "inclusionProof") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("verification" + "." + "inclusionProof") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *LogEntryAnonVerification) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *LogEntryAnonVerification) UnmarshalBinary(b []byte) error { - var res LogEntryAnonVerification - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go deleted file mode 100644 index cb57b27f51..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go +++ /dev/null @@ -1,221 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// LogInfo log info -// -// swagger:model LogInfo -type LogInfo struct { - - // inactive shards - InactiveShards []*InactiveShardLogInfo `json:"inactiveShards"` - - // The current hash value stored at the root of the merkle tree - // Required: true - // Pattern: ^[0-9a-fA-F]{64}$ - RootHash *string `json:"rootHash"` - - // The current signed tree head - // Required: true - SignedTreeHead *string `json:"signedTreeHead"` - - // The current treeID - // Required: true - // Pattern: ^[0-9]+$ - TreeID *string `json:"treeID"` - - // The current number of nodes in the merkle tree - // Required: true - // Minimum: 1 - TreeSize *int64 `json:"treeSize"` -} - -// Validate validates this log info -func (m *LogInfo) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateInactiveShards(formats); err != nil { - res = append(res, err) - } - - if err := m.validateRootHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignedTreeHead(formats); err != nil { - res = append(res, err) - } - - if err := m.validateTreeID(formats); err != nil { - res = append(res, err) - } - - if err := m.validateTreeSize(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogInfo) validateInactiveShards(formats strfmt.Registry) error { - if swag.IsZero(m.InactiveShards) { // not required - return nil - } - - for i := 0; i < len(m.InactiveShards); i++ { - if swag.IsZero(m.InactiveShards[i]) { // not required - continue - } - - if m.InactiveShards[i] != nil { - if err := m.InactiveShards[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -func (m *LogInfo) validateRootHash(formats strfmt.Registry) error { - - if err := validate.Required("rootHash", "body", m.RootHash); err != nil { - return err - } - - if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - return nil -} - -func (m *LogInfo) validateSignedTreeHead(formats strfmt.Registry) error { - - if err := validate.Required("signedTreeHead", "body", m.SignedTreeHead); err != nil { - return err - } - - return nil -} - -func (m *LogInfo) validateTreeID(formats strfmt.Registry) error { - - if err := validate.Required("treeID", "body", m.TreeID); err != nil { - return err - } - - if err := validate.Pattern("treeID", "body", *m.TreeID, `^[0-9]+$`); err != nil { - return err - } - - return nil -} - -func (m *LogInfo) validateTreeSize(formats strfmt.Registry) error { - - if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { - return err - } - - if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this log info based on the context it is used -func (m *LogInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateInactiveShards(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogInfo) contextValidateInactiveShards(ctx context.Context, formats strfmt.Registry) error { - - for i := 0; i < len(m.InactiveShards); i++ { - - if m.InactiveShards[i] != nil { - - if swag.IsZero(m.InactiveShards[i]) { // not required - return nil - } - - if err := m.InactiveShards[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *LogInfo) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *LogInfo) UnmarshalBinary(b []byte) error { - var res LogInfo - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go deleted file mode 100644 index 5b734a5fff..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go +++ /dev/null @@ -1,195 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/validate" -) - -// ProposedEntry proposed entry -// -// swagger:discriminator ProposedEntry kind -type ProposedEntry interface { - runtime.Validatable - runtime.ContextValidatable - - // kind - // Required: true - Kind() string - SetKind(string) - - // AdditionalProperties in base type shoud be handled just like regular properties - // At this moment, the base type property is pushed down to the subtype -} - -type proposedEntry struct { - kindField string -} - -// Kind gets the kind of this polymorphic type -func (m *proposedEntry) Kind() string { - return "ProposedEntry" -} - -// SetKind sets the kind of this polymorphic type -func (m *proposedEntry) SetKind(val string) { -} - -// UnmarshalProposedEntrySlice unmarshals polymorphic slices of ProposedEntry -func UnmarshalProposedEntrySlice(reader io.Reader, consumer runtime.Consumer) ([]ProposedEntry, error) { - var elements []json.RawMessage - if err := consumer.Consume(reader, &elements); err != nil { - return nil, err - } - - var result []ProposedEntry - for _, element := range elements { - obj, err := unmarshalProposedEntry(element, consumer) - if err != nil { - return nil, err - } - result = append(result, obj) - } - return result, nil -} - -// UnmarshalProposedEntry unmarshals polymorphic ProposedEntry -func UnmarshalProposedEntry(reader io.Reader, consumer runtime.Consumer) (ProposedEntry, error) { - // we need to read this twice, so first into a buffer - data, err := io.ReadAll(reader) - if err != nil { - return nil, err - } - return unmarshalProposedEntry(data, consumer) -} - -func unmarshalProposedEntry(data []byte, consumer runtime.Consumer) (ProposedEntry, error) { - buf := bytes.NewBuffer(data) - buf2 := bytes.NewBuffer(data) - - // the first time this is read is to fetch the value of the kind property. - var getType struct { - Kind string `json:"kind"` - } - if err := consumer.Consume(buf, &getType); err != nil { - return nil, err - } - - if err := validate.RequiredString("kind", "body", getType.Kind); err != nil { - return nil, err - } - - // The value of kind is used to determine which type to create and unmarshal the data into - switch getType.Kind { - case "ProposedEntry": - var result proposedEntry - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "alpine": - var result Alpine - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "cose": - var result Cose - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "dsse": - var result DSSE - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "hashedrekord": - var result Hashedrekord - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "helm": - var result Helm - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "intoto": - var result Intoto - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "jar": - var result Jar - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "rekord": - var result Rekord - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "rfc3161": - var result Rfc3161 - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "rpm": - var result Rpm - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "tuf": - var result TUF - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - } - return nil, errors.New(422, "invalid kind value: %q", getType.Kind) -} - -// Validate validates this proposed entry -func (m *proposedEntry) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this proposed entry based on context it is used -func (m *proposedEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go deleted file mode 100644 index 81c8ff0545..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Rekord Rekord object -// -// swagger:model rekord -type Rekord struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RekordSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Rekord) Kind() string { - return "rekord" -} - -// SetKind sets the kind of this subtype -func (m *Rekord) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Rekord) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RekordSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Rekord - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Rekord) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RekordSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this rekord -func (m *Rekord) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rekord) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Rekord) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this rekord based on the context it is used -func (m *Rekord) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Rekord) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Rekord) UnmarshalBinary(b []byte) error { - var res Rekord - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go deleted file mode 100644 index e85442ae97..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// RekordSchema Rekor Schema -// -// # Schema for Rekord objects -// -// swagger:model rekordSchema -type RekordSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go deleted file mode 100644 index 9a525717d1..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go +++ /dev/null @@ -1,611 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// RekordV001Schema Rekor v0.0.1 Schema -// -// # Schema for Rekord object -// -// swagger:model rekordV001Schema -type RekordV001Schema struct { - - // data - // Required: true - Data *RekordV001SchemaData `json:"data"` - - // signature - // Required: true - Signature *RekordV001SchemaSignature `json:"signature"` -} - -// Validate validates this rekord v001 schema -func (m *RekordV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateData(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignature(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001Schema) validateData(formats strfmt.Registry) error { - - if err := validate.Required("data", "body", m.Data); err != nil { - return err - } - - if m.Data != nil { - if err := m.Data.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -func (m *RekordV001Schema) validateSignature(formats strfmt.Registry) error { - - if err := validate.Required("signature", "body", m.Signature); err != nil { - return err - } - - if m.Signature != nil { - if err := m.Signature.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rekord v001 schema based on the context it is used -func (m *RekordV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateData(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSignature(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { - - if m.Data != nil { - - if err := m.Data.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -func (m *RekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { - - if m.Signature != nil { - - if err := m.Signature.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *RekordV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RekordV001Schema) UnmarshalBinary(b []byte) error { - var res RekordV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RekordV001SchemaData Information about the content associated with the entry -// -// swagger:model RekordV001SchemaData -type RekordV001SchemaData struct { - - // Specifies the content inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // hash - Hash *RekordV001SchemaDataHash `json:"hash,omitempty"` -} - -// Validate validates this rekord v001 schema data -func (m *RekordV001SchemaData) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001SchemaData) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "hash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rekord v001 schema data based on the context it is used -func (m *RekordV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "hash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *RekordV001SchemaData) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RekordV001SchemaData) UnmarshalBinary(b []byte) error { - var res RekordV001SchemaData - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RekordV001SchemaDataHash Specifies the hash algorithm and value for the content -// -// swagger:model RekordV001SchemaDataHash -type RekordV001SchemaDataHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the content - // Required: true - Value *string `json:"value"` -} - -// Validate validates this rekord v001 schema data hash -func (m *RekordV001SchemaDataHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var rekordV001SchemaDataHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - rekordV001SchemaDataHashTypeAlgorithmPropEnum = append(rekordV001SchemaDataHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // RekordV001SchemaDataHashAlgorithmSha256 captures enum value "sha256" - RekordV001SchemaDataHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *RekordV001SchemaDataHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, rekordV001SchemaDataHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *RekordV001SchemaDataHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("data"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *RekordV001SchemaDataHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this rekord v001 schema data hash based on the context it is used -func (m *RekordV001SchemaDataHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *RekordV001SchemaDataHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RekordV001SchemaDataHash) UnmarshalBinary(b []byte) error { - var res RekordV001SchemaDataHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RekordV001SchemaSignature Information about the detached signature associated with the entry -// -// swagger:model RekordV001SchemaSignature -type RekordV001SchemaSignature struct { - - // Specifies the content of the signature inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` - - // Specifies the format of the signature - // Required: true - // Enum: [pgp minisign x509 ssh] - Format *string `json:"format"` - - // public key - // Required: true - PublicKey *RekordV001SchemaSignaturePublicKey `json:"publicKey"` -} - -// Validate validates this rekord v001 schema signature -func (m *RekordV001SchemaSignature) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if err := m.validateFormat(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001SchemaSignature) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -var rekordV001SchemaSignatureTypeFormatPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["pgp","minisign","x509","ssh"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - rekordV001SchemaSignatureTypeFormatPropEnum = append(rekordV001SchemaSignatureTypeFormatPropEnum, v) - } -} - -const ( - - // RekordV001SchemaSignatureFormatPgp captures enum value "pgp" - RekordV001SchemaSignatureFormatPgp string = "pgp" - - // RekordV001SchemaSignatureFormatMinisign captures enum value "minisign" - RekordV001SchemaSignatureFormatMinisign string = "minisign" - - // RekordV001SchemaSignatureFormatX509 captures enum value "x509" - RekordV001SchemaSignatureFormatX509 string = "x509" - - // RekordV001SchemaSignatureFormatSSH captures enum value "ssh" - RekordV001SchemaSignatureFormatSSH string = "ssh" -) - -// prop value enum -func (m *RekordV001SchemaSignature) validateFormatEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, rekordV001SchemaSignatureTypeFormatPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *RekordV001SchemaSignature) validateFormat(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"format", "body", m.Format); err != nil { - return err - } - - // value enum - if err := m.validateFormatEnum("signature"+"."+"format", "body", *m.Format); err != nil { - return err - } - - return nil -} - -func (m *RekordV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"publicKey", "body", m.PublicKey); err != nil { - return err - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rekord v001 schema signature based on the context it is used -func (m *RekordV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *RekordV001SchemaSignature) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RekordV001SchemaSignature) UnmarshalBinary(b []byte) error { - var res RekordV001SchemaSignature - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RekordV001SchemaSignaturePublicKey The public key that can verify the signature -// -// swagger:model RekordV001SchemaSignaturePublicKey -type RekordV001SchemaSignaturePublicKey struct { - - // Specifies the content of the public key inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this rekord v001 schema signature public key -func (m *RekordV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001SchemaSignaturePublicKey) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"publicKey"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this rekord v001 schema signature public key based on context it is used -func (m *RekordV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *RekordV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RekordV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error { - var res RekordV001SchemaSignaturePublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go deleted file mode 100644 index ef8d42e7a2..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Rfc3161 RFC3161 Timestamp -// -// swagger:model rfc3161 -type Rfc3161 struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec Rfc3161Schema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Rfc3161) Kind() string { - return "rfc3161" -} - -// SetKind sets the kind of this subtype -func (m *Rfc3161) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Rfc3161) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec Rfc3161Schema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Rfc3161 - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Rfc3161) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec Rfc3161Schema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this rfc3161 -func (m *Rfc3161) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rfc3161) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Rfc3161) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this rfc3161 based on the context it is used -func (m *Rfc3161) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Rfc3161) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Rfc3161) UnmarshalBinary(b []byte) error { - var res Rfc3161 - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go deleted file mode 100644 index 826013a28d..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Rfc3161Schema Timestamp Schema -// -// # Schema for RFC 3161 timestamp objects -// -// swagger:model rfc3161Schema -type Rfc3161Schema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go deleted file mode 100644 index c3a50c8492..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go +++ /dev/null @@ -1,183 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Rfc3161V001Schema Timestamp v0.0.1 Schema -// -// # Schema for RFC3161 entries -// -// swagger:model rfc3161V001Schema -type Rfc3161V001Schema struct { - - // tsr - // Required: true - Tsr *Rfc3161V001SchemaTsr `json:"tsr"` -} - -// Validate validates this rfc3161 v001 schema -func (m *Rfc3161V001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateTsr(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rfc3161V001Schema) validateTsr(formats strfmt.Registry) error { - - if err := validate.Required("tsr", "body", m.Tsr); err != nil { - return err - } - - if m.Tsr != nil { - if err := m.Tsr.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("tsr") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("tsr") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rfc3161 v001 schema based on the context it is used -func (m *Rfc3161V001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateTsr(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rfc3161V001Schema) contextValidateTsr(ctx context.Context, formats strfmt.Registry) error { - - if m.Tsr != nil { - - if err := m.Tsr.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("tsr") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("tsr") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *Rfc3161V001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Rfc3161V001Schema) UnmarshalBinary(b []byte) error { - var res Rfc3161V001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// Rfc3161V001SchemaTsr Information about the tsr file associated with the entry -// -// swagger:model Rfc3161V001SchemaTsr -type Rfc3161V001SchemaTsr struct { - - // Specifies the tsr file content inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this rfc3161 v001 schema tsr -func (m *Rfc3161V001SchemaTsr) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rfc3161V001SchemaTsr) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("tsr"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this rfc3161 v001 schema tsr based on context it is used -func (m *Rfc3161V001SchemaTsr) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *Rfc3161V001SchemaTsr) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Rfc3161V001SchemaTsr) UnmarshalBinary(b []byte) error { - var res Rfc3161V001SchemaTsr - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go deleted file mode 100644 index 8b1f10c77e..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Rpm RPM package -// -// swagger:model rpm -type Rpm struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RpmSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Rpm) Kind() string { - return "rpm" -} - -// SetKind sets the kind of this subtype -func (m *Rpm) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Rpm) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RpmSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Rpm - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Rpm) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RpmSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this rpm -func (m *Rpm) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rpm) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Rpm) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this rpm based on the context it is used -func (m *Rpm) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Rpm) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Rpm) UnmarshalBinary(b []byte) error { - var res Rpm - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go deleted file mode 100644 index 5cb378366f..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// RpmSchema RPM Schema -// -// # Schema for RPM objects -// -// swagger:model rpmSchema -type RpmSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go deleted file mode 100644 index 80dadde7fb..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go +++ /dev/null @@ -1,450 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// RpmV001Schema RPM v0.0.1 Schema -// -// # Schema for RPM entries -// -// swagger:model rpmV001Schema -type RpmV001Schema struct { - - // package - // Required: true - Package *RpmV001SchemaPackage `json:"package"` - - // public key - // Required: true - PublicKey *RpmV001SchemaPublicKey `json:"publicKey"` -} - -// Validate validates this rpm v001 schema -func (m *RpmV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePackage(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RpmV001Schema) validatePackage(formats strfmt.Registry) error { - - if err := validate.Required("package", "body", m.Package); err != nil { - return err - } - - if m.Package != nil { - if err := m.Package.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package") - } - return err - } - } - - return nil -} - -func (m *RpmV001Schema) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rpm v001 schema based on the context it is used -func (m *RpmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePackage(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RpmV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error { - - if m.Package != nil { - - if err := m.Package.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package") - } - return err - } - } - - return nil -} - -func (m *RpmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *RpmV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RpmV001Schema) UnmarshalBinary(b []byte) error { - var res RpmV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RpmV001SchemaPackage Information about the package associated with the entry -// -// swagger:model RpmV001SchemaPackage -type RpmV001SchemaPackage struct { - - // Specifies the package inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // hash - Hash *RpmV001SchemaPackageHash `json:"hash,omitempty"` - - // Values of the RPM headers - // Read Only: true - Headers map[string]string `json:"headers,omitempty"` -} - -// Validate validates this rpm v001 schema package -func (m *RpmV001SchemaPackage) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RpmV001SchemaPackage) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package" + "." + "hash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rpm v001 schema package based on the context it is used -func (m *RpmV001SchemaPackage) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateHeaders(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RpmV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *RpmV001SchemaPackage) contextValidateHeaders(ctx context.Context, formats strfmt.Registry) error { - - return nil -} - -// MarshalBinary interface implementation -func (m *RpmV001SchemaPackage) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RpmV001SchemaPackage) UnmarshalBinary(b []byte) error { - var res RpmV001SchemaPackage - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RpmV001SchemaPackageHash Specifies the hash algorithm and value for the package -// -// swagger:model RpmV001SchemaPackageHash -type RpmV001SchemaPackageHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the package - // Required: true - Value *string `json:"value"` -} - -// Validate validates this rpm v001 schema package hash -func (m *RpmV001SchemaPackageHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var rpmV001SchemaPackageHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - rpmV001SchemaPackageHashTypeAlgorithmPropEnum = append(rpmV001SchemaPackageHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // RpmV001SchemaPackageHashAlgorithmSha256 captures enum value "sha256" - RpmV001SchemaPackageHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *RpmV001SchemaPackageHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, rpmV001SchemaPackageHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *RpmV001SchemaPackageHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("package"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("package"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *RpmV001SchemaPackageHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("package"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this rpm v001 schema package hash based on context it is used -func (m *RpmV001SchemaPackageHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *RpmV001SchemaPackageHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RpmV001SchemaPackageHash) UnmarshalBinary(b []byte) error { - var res RpmV001SchemaPackageHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RpmV001SchemaPublicKey The PGP public key that can verify the RPM signature -// -// swagger:model RpmV001SchemaPublicKey -type RpmV001SchemaPublicKey struct { - - // Specifies the content of the public key inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this rpm v001 schema public key -func (m *RpmV001SchemaPublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RpmV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this rpm v001 schema public key based on context it is used -func (m *RpmV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *RpmV001SchemaPublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RpmV001SchemaPublicKey) UnmarshalBinary(b []byte) error { - var res RpmV001SchemaPublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go deleted file mode 100644 index bb1ccccc29..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go +++ /dev/null @@ -1,341 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// SearchIndex search index -// -// swagger:model SearchIndex -type SearchIndex struct { - - // email - // Format: email - Email strfmt.Email `json:"email,omitempty"` - - // hash - // Pattern: ^(sha512:)?[0-9a-fA-F]{128}$|^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$ - Hash string `json:"hash,omitempty"` - - // operator - // Enum: [and or] - Operator string `json:"operator,omitempty"` - - // public key - PublicKey *SearchIndexPublicKey `json:"publicKey,omitempty"` -} - -// Validate validates this search index -func (m *SearchIndex) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEmail(formats); err != nil { - res = append(res, err) - } - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateOperator(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *SearchIndex) validateEmail(formats strfmt.Registry) error { - if swag.IsZero(m.Email) { // not required - return nil - } - - if err := validate.FormatOf("email", "body", "email", m.Email.String(), formats); err != nil { - return err - } - - return nil -} - -func (m *SearchIndex) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := validate.Pattern("hash", "body", m.Hash, `^(sha512:)?[0-9a-fA-F]{128}$|^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$`); err != nil { - return err - } - - return nil -} - -var searchIndexTypeOperatorPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["and","or"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - searchIndexTypeOperatorPropEnum = append(searchIndexTypeOperatorPropEnum, v) - } -} - -const ( - - // SearchIndexOperatorAnd captures enum value "and" - SearchIndexOperatorAnd string = "and" - - // SearchIndexOperatorOr captures enum value "or" - SearchIndexOperatorOr string = "or" -) - -// prop value enum -func (m *SearchIndex) validateOperatorEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, searchIndexTypeOperatorPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *SearchIndex) validateOperator(formats strfmt.Registry) error { - if swag.IsZero(m.Operator) { // not required - return nil - } - - // value enum - if err := m.validateOperatorEnum("operator", "body", m.Operator); err != nil { - return err - } - - return nil -} - -func (m *SearchIndex) validatePublicKey(formats strfmt.Registry) error { - if swag.IsZero(m.PublicKey) { // not required - return nil - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this search index based on the context it is used -func (m *SearchIndex) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *SearchIndex) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if swag.IsZero(m.PublicKey) { // not required - return nil - } - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *SearchIndex) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *SearchIndex) UnmarshalBinary(b []byte) error { - var res SearchIndex - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// SearchIndexPublicKey search index public key -// -// swagger:model SearchIndexPublicKey -type SearchIndexPublicKey struct { - - // content - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // format - // Required: true - // Enum: [pgp x509 minisign ssh tuf] - Format *string `json:"format"` - - // url - // Format: uri - URL strfmt.URI `json:"url,omitempty"` -} - -// Validate validates this search index public key -func (m *SearchIndexPublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateFormat(formats); err != nil { - res = append(res, err) - } - - if err := m.validateURL(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var searchIndexPublicKeyTypeFormatPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["pgp","x509","minisign","ssh","tuf"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - searchIndexPublicKeyTypeFormatPropEnum = append(searchIndexPublicKeyTypeFormatPropEnum, v) - } -} - -const ( - - // SearchIndexPublicKeyFormatPgp captures enum value "pgp" - SearchIndexPublicKeyFormatPgp string = "pgp" - - // SearchIndexPublicKeyFormatX509 captures enum value "x509" - SearchIndexPublicKeyFormatX509 string = "x509" - - // SearchIndexPublicKeyFormatMinisign captures enum value "minisign" - SearchIndexPublicKeyFormatMinisign string = "minisign" - - // SearchIndexPublicKeyFormatSSH captures enum value "ssh" - SearchIndexPublicKeyFormatSSH string = "ssh" - - // SearchIndexPublicKeyFormatTUF captures enum value "tuf" - SearchIndexPublicKeyFormatTUF string = "tuf" -) - -// prop value enum -func (m *SearchIndexPublicKey) validateFormatEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, searchIndexPublicKeyTypeFormatPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *SearchIndexPublicKey) validateFormat(formats strfmt.Registry) error { - - if err := validate.Required("publicKey"+"."+"format", "body", m.Format); err != nil { - return err - } - - // value enum - if err := m.validateFormatEnum("publicKey"+"."+"format", "body", *m.Format); err != nil { - return err - } - - return nil -} - -func (m *SearchIndexPublicKey) validateURL(formats strfmt.Registry) error { - if swag.IsZero(m.URL) { // not required - return nil - } - - if err := validate.FormatOf("publicKey"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this search index public key based on context it is used -func (m *SearchIndexPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *SearchIndexPublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *SearchIndexPublicKey) UnmarshalBinary(b []byte) error { - var res SearchIndexPublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go deleted file mode 100644 index 425ec8b348..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go +++ /dev/null @@ -1,297 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - "io" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// SearchLogQuery search log query -// -// swagger:model SearchLogQuery -type SearchLogQuery struct { - entriesField []ProposedEntry - - // entry u UI ds - // Max Items: 10 - // Min Items: 1 - EntryUUIDs []string `json:"entryUUIDs"` - - // log indexes - // Max Items: 10 - // Min Items: 1 - LogIndexes []*int64 `json:"logIndexes"` -} - -// Entries gets the entries of this base type -func (m *SearchLogQuery) Entries() []ProposedEntry { - return m.entriesField -} - -// SetEntries sets the entries of this base type -func (m *SearchLogQuery) SetEntries(val []ProposedEntry) { - m.entriesField = val -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *SearchLogQuery) UnmarshalJSON(raw []byte) error { - var data struct { - Entries json.RawMessage `json:"entries"` - - EntryUUIDs []string `json:"entryUUIDs"` - - LogIndexes []*int64 `json:"logIndexes"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var propEntries []ProposedEntry - if string(data.Entries) != "null" { - entries, err := UnmarshalProposedEntrySlice(bytes.NewBuffer(data.Entries), runtime.JSONConsumer()) - if err != nil && err != io.EOF { - return err - } - propEntries = entries - } - - var result SearchLogQuery - - // entries - result.entriesField = propEntries - - // entryUUIDs - result.EntryUUIDs = data.EntryUUIDs - - // logIndexes - result.LogIndexes = data.LogIndexes - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m SearchLogQuery) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - EntryUUIDs []string `json:"entryUUIDs"` - - LogIndexes []*int64 `json:"logIndexes"` - }{ - - EntryUUIDs: m.EntryUUIDs, - - LogIndexes: m.LogIndexes, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Entries []ProposedEntry `json:"entries"` - }{ - - Entries: m.entriesField, - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this search log query -func (m *SearchLogQuery) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEntries(formats); err != nil { - res = append(res, err) - } - - if err := m.validateEntryUUIDs(formats); err != nil { - res = append(res, err) - } - - if err := m.validateLogIndexes(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *SearchLogQuery) validateEntries(formats strfmt.Registry) error { - if swag.IsZero(m.Entries()) { // not required - return nil - } - - iEntriesSize := int64(len(m.Entries())) - - if err := validate.MinItems("entries", "body", iEntriesSize, 1); err != nil { - return err - } - - if err := validate.MaxItems("entries", "body", iEntriesSize, 10); err != nil { - return err - } - - for i := 0; i < len(m.Entries()); i++ { - - if err := m.entriesField[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("entries" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("entries" + "." + strconv.Itoa(i)) - } - return err - } - - } - - return nil -} - -func (m *SearchLogQuery) validateEntryUUIDs(formats strfmt.Registry) error { - if swag.IsZero(m.EntryUUIDs) { // not required - return nil - } - - iEntryUUIDsSize := int64(len(m.EntryUUIDs)) - - if err := validate.MinItems("entryUUIDs", "body", iEntryUUIDsSize, 1); err != nil { - return err - } - - if err := validate.MaxItems("entryUUIDs", "body", iEntryUUIDsSize, 10); err != nil { - return err - } - - for i := 0; i < len(m.EntryUUIDs); i++ { - - if err := validate.Pattern("entryUUIDs"+"."+strconv.Itoa(i), "body", m.EntryUUIDs[i], `^([0-9a-fA-F]{64}|[0-9a-fA-F]{80})$`); err != nil { - return err - } - - } - - return nil -} - -func (m *SearchLogQuery) validateLogIndexes(formats strfmt.Registry) error { - if swag.IsZero(m.LogIndexes) { // not required - return nil - } - - iLogIndexesSize := int64(len(m.LogIndexes)) - - if err := validate.MinItems("logIndexes", "body", iLogIndexesSize, 1); err != nil { - return err - } - - if err := validate.MaxItems("logIndexes", "body", iLogIndexesSize, 10); err != nil { - return err - } - - for i := 0; i < len(m.LogIndexes); i++ { - if swag.IsZero(m.LogIndexes[i]) { // not required - continue - } - - if err := validate.MinimumInt("logIndexes"+"."+strconv.Itoa(i), "body", *m.LogIndexes[i], 0, false); err != nil { - return err - } - - } - - return nil -} - -// ContextValidate validate this search log query based on the context it is used -func (m *SearchLogQuery) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateEntries(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *SearchLogQuery) contextValidateEntries(ctx context.Context, formats strfmt.Registry) error { - - for i := 0; i < len(m.Entries()); i++ { - - if swag.IsZero(m.entriesField[i]) { // not required - return nil - } - - if err := m.entriesField[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("entries" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("entries" + "." + strconv.Itoa(i)) - } - return err - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *SearchLogQuery) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *SearchLogQuery) UnmarshalBinary(b []byte) error { - var res SearchLogQuery - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go deleted file mode 100644 index a5f6eff0f7..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// TUF TUF metadata -// -// swagger:model tuf -type TUF struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec TUFSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *TUF) Kind() string { - return "tuf" -} - -// SetKind sets the kind of this subtype -func (m *TUF) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *TUF) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec TUFSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result TUF - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m TUF) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec TUFSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this tuf -func (m *TUF) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUF) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *TUF) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this tuf based on the context it is used -func (m *TUF) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *TUF) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *TUF) UnmarshalBinary(b []byte) error { - var res TUF - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go deleted file mode 100644 index 37dca8b68e..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// TUFSchema TUF Schema -// -// # Schema for TUF metadata objects -// -// swagger:model tufSchema -type TUFSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go deleted file mode 100644 index 021e0ce7d3..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go +++ /dev/null @@ -1,304 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// TUFV001Schema TUF v0.0.1 Schema -// -// # Schema for TUF metadata entries -// -// swagger:model tufV001Schema -type TUFV001Schema struct { - - // metadata - // Required: true - Metadata *TUFV001SchemaMetadata `json:"metadata"` - - // root - // Required: true - Root *TUFV001SchemaRoot `json:"root"` - - // TUF specification version - // Read Only: true - SpecVersion string `json:"spec_version,omitempty"` -} - -// Validate validates this tuf v001 schema -func (m *TUFV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateMetadata(formats); err != nil { - res = append(res, err) - } - - if err := m.validateRoot(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUFV001Schema) validateMetadata(formats strfmt.Registry) error { - - if err := validate.Required("metadata", "body", m.Metadata); err != nil { - return err - } - - if m.Metadata != nil { - if err := m.Metadata.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("metadata") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("metadata") - } - return err - } - } - - return nil -} - -func (m *TUFV001Schema) validateRoot(formats strfmt.Registry) error { - - if err := validate.Required("root", "body", m.Root); err != nil { - return err - } - - if m.Root != nil { - if err := m.Root.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("root") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("root") - } - return err - } - } - - return nil -} - -// ContextValidate validate this tuf v001 schema based on the context it is used -func (m *TUFV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateMetadata(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateRoot(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSpecVersion(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUFV001Schema) contextValidateMetadata(ctx context.Context, formats strfmt.Registry) error { - - if m.Metadata != nil { - - if err := m.Metadata.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("metadata") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("metadata") - } - return err - } - } - - return nil -} - -func (m *TUFV001Schema) contextValidateRoot(ctx context.Context, formats strfmt.Registry) error { - - if m.Root != nil { - - if err := m.Root.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("root") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("root") - } - return err - } - } - - return nil -} - -func (m *TUFV001Schema) contextValidateSpecVersion(ctx context.Context, formats strfmt.Registry) error { - - if err := validate.ReadOnly(ctx, "spec_version", "body", string(m.SpecVersion)); err != nil { - return err - } - - return nil -} - -// MarshalBinary interface implementation -func (m *TUFV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *TUFV001Schema) UnmarshalBinary(b []byte) error { - var res TUFV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// TUFV001SchemaMetadata TUF metadata -// -// swagger:model TUFV001SchemaMetadata -type TUFV001SchemaMetadata struct { - - // Specifies the metadata inline within the document - // Required: true - Content interface{} `json:"content"` -} - -// Validate validates this TUF v001 schema metadata -func (m *TUFV001SchemaMetadata) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUFV001SchemaMetadata) validateContent(formats strfmt.Registry) error { - - if m.Content == nil { - return errors.Required("metadata"+"."+"content", "body", nil) - } - - return nil -} - -// ContextValidate validates this TUF v001 schema metadata based on context it is used -func (m *TUFV001SchemaMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *TUFV001SchemaMetadata) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *TUFV001SchemaMetadata) UnmarshalBinary(b []byte) error { - var res TUFV001SchemaMetadata - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// TUFV001SchemaRoot root metadata containing about the public keys used to sign the manifest -// -// swagger:model TUFV001SchemaRoot -type TUFV001SchemaRoot struct { - - // Specifies the metadata inline within the document - // Required: true - Content interface{} `json:"content"` -} - -// Validate validates this TUF v001 schema root -func (m *TUFV001SchemaRoot) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUFV001SchemaRoot) validateContent(formats strfmt.Registry) error { - - if m.Content == nil { - return errors.Required("root"+"."+"content", "body", nil) - } - - return nil -} - -// ContextValidate validates this TUF v001 schema root based on context it is used -func (m *TUFV001SchemaRoot) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *TUFV001SchemaRoot) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *TUFV001SchemaRoot) UnmarshalBinary(b []byte) error { - var res TUFV001SchemaRoot - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt b/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt deleted file mode 100644 index 7a01c84986..0000000000 --- a/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt +++ /dev/null @@ -1,14 +0,0 @@ - -Copyright 2021 The Sigstore Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/sigstore/sigstore/LICENSE b/vendor/github.com/sigstore/sigstore/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/sigstore/sigstore/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go deleted file mode 100644 index 348f47bdc8..0000000000 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go +++ /dev/null @@ -1,173 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cryptoutils implements support for working with encoded certificates, public keys, and private keys -package cryptoutils - -import ( - "bytes" - "crypto/rand" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" - "time" -) - -const ( - // CertificatePEMType is the string "CERTIFICATE" to be used during PEM encoding and decoding - CertificatePEMType PEMType = "CERTIFICATE" -) - -// MarshalCertificateToPEM converts the provided X509 certificate into PEM format -func MarshalCertificateToPEM(cert *x509.Certificate) ([]byte, error) { - if cert == nil { - return nil, errors.New("nil certificate provided") - } - return PEMEncode(CertificatePEMType, cert.Raw), nil -} - -// MarshalCertificatesToPEM converts the provided X509 certificates into PEM format -func MarshalCertificatesToPEM(certs []*x509.Certificate) ([]byte, error) { - buf := bytes.Buffer{} - for _, cert := range certs { - pemBytes, err := MarshalCertificateToPEM(cert) - if err != nil { - return nil, err - } - _, _ = buf.Write(pemBytes) - } - return buf.Bytes(), nil -} - -// UnmarshalCertificatesFromPEM extracts one or more X509 certificates from the provided -// byte slice, which is assumed to be in PEM-encoded format. -func UnmarshalCertificatesFromPEM(pemBytes []byte) ([]*x509.Certificate, error) { - result := []*x509.Certificate{} - remaining := pemBytes - remaining = bytes.TrimSpace(remaining) - - for len(remaining) > 0 { - var certDer *pem.Block - certDer, remaining = pem.Decode(remaining) - - if certDer == nil { - return nil, errors.New("error during PEM decoding") - } - - cert, err := x509.ParseCertificate(certDer.Bytes) - if err != nil { - return nil, err - } - result = append(result, cert) - } - return result, nil -} - -// UnmarshalCertificatesFromPEMLimited extracts one or more X509 certificates from the provided -// byte slice, which is assumed to be in PEM-encoded format. Fails after a specified -// number of iterations. A reasonable limit is 10 iterations. -func UnmarshalCertificatesFromPEMLimited(pemBytes []byte, iterations int) ([]*x509.Certificate, error) { - result := []*x509.Certificate{} - remaining := pemBytes - remaining = bytes.TrimSpace(remaining) - - count := 0 - for len(remaining) > 0 { - if count == iterations { - return nil, errors.New("too many certificates specified in PEM block") - } - var certDer *pem.Block - certDer, remaining = pem.Decode(remaining) - - if certDer == nil { - return nil, errors.New("error during PEM decoding") - } - - cert, err := x509.ParseCertificate(certDer.Bytes) - if err != nil { - return nil, err - } - result = append(result, cert) - count++ - } - return result, nil -} - -// LoadCertificatesFromPEM extracts one or more X509 certificates from the provided -// io.Reader. -func LoadCertificatesFromPEM(pem io.Reader) ([]*x509.Certificate, error) { - fileBytes, err := io.ReadAll(pem) - if err != nil { - return nil, err - } - return UnmarshalCertificatesFromPEM(fileBytes) -} - -func formatTime(t time.Time) string { - return t.UTC().Format(time.RFC3339) -} - -// CheckExpiration verifies that epoch is during the validity period of -// the certificate provided. -// -// It returns nil if issueTime < epoch < expirationTime, and error otherwise. -func CheckExpiration(cert *x509.Certificate, epoch time.Time) error { - if cert == nil { - return errors.New("certificate is nil") - } - if cert.NotAfter.Before(epoch) { - return fmt.Errorf("certificate expiration time %s is before %s", formatTime(cert.NotAfter), formatTime(epoch)) - } - if cert.NotBefore.After(epoch) { - return fmt.Errorf("certificate issued time %s is before %s", formatTime(cert.NotBefore), formatTime(epoch)) - } - return nil -} - -// ParseCSR parses a PKCS#10 PEM-encoded CSR. -func ParseCSR(csr []byte) (*x509.CertificateRequest, error) { - derBlock, _ := pem.Decode(csr) - if derBlock == nil || derBlock.Bytes == nil { - return nil, errors.New("no CSR found while decoding") - } - correctType := false - acceptedHeaders := []string{"CERTIFICATE REQUEST", "NEW CERTIFICATE REQUEST"} - for _, v := range acceptedHeaders { - if derBlock.Type == v { - correctType = true - } - } - if !correctType { - return nil, fmt.Errorf("DER type %v is not of any type %v for CSR", derBlock.Type, acceptedHeaders) - } - - return x509.ParseCertificateRequest(derBlock.Bytes) -} - -// GenerateSerialNumber creates a compliant serial number as per RFC 5280 4.1.2.2. -// Serial numbers must be positive, and can be no longer than 20 bytes. -// The serial number is generated with 159 bits, so that the first bit will always -// be 0, resulting in a positive serial number. -func GenerateSerialNumber() (*big.Int, error) { - // Pick a random number from 0 to 2^159. - serial, err := rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) - if err != nil { - return nil, errors.New("error generating serial number") - } - return serial, nil -} diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/doc.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/doc.go deleted file mode 100644 index 4e7e73d551..0000000000 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// -// Copyright 2022 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cryptoutils contains utilities related to handling cryptographic materials. -package cryptoutils diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go deleted file mode 100644 index 3fa6e7ba5b..0000000000 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go +++ /dev/null @@ -1,31 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cryptoutils - -import ( - "encoding/pem" -) - -// PEMType is a specific type for string constants used during PEM encoding and decoding -type PEMType string - -// PEMEncode encodes the specified byte slice in PEM format using the provided type string -func PEMEncode(typeStr PEMType, bytes []byte) []byte { - return pem.EncodeToMemory(&pem.Block{ - Type: string(typeStr), - Bytes: bytes, - }) -} diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go deleted file mode 100644 index 89dd05e017..0000000000 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go +++ /dev/null @@ -1,94 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cryptoutils - -import ( - "errors" - "fmt" - "io" - "os" - - "golang.org/x/term" -) - -// PassFunc is a type of function that takes a boolean (representing whether confirmation is desired) and returns the password as read, along with an error if one occurred -type PassFunc func(bool) ([]byte, error) - -// Read is for fuzzing -var Read = readPasswordFn - -// readPasswordFn reads the password from the following sources, in order of preference: -// -// - COSIGN_PASSWORD environment variable -// -// - user input from from terminal (if present) -// -// - provided to stdin from pipe -func readPasswordFn() func() ([]byte, error) { - if pw, ok := os.LookupEnv("COSIGN_PASSWORD"); ok { - return func() ([]byte, error) { - return []byte(pw), nil - } - } - if term.IsTerminal(0) { - return func() ([]byte, error) { - return term.ReadPassword(0) - } - } - // Handle piped in passwords. - return func() ([]byte, error) { - return io.ReadAll(os.Stdin) - } -} - -// StaticPasswordFunc returns a PassFunc which returns the provided password. -func StaticPasswordFunc(pw []byte) PassFunc { - return func(bool) ([]byte, error) { - return pw, nil - } -} - -// SkipPassword is a PassFunc that does not interact with a user, but -// simply returns nil for both the password result and error struct. -func SkipPassword(_ bool) ([]byte, error) { - return nil, nil -} - -// GetPasswordFromStdIn gathers the password from stdin with an -// optional confirmation step. -func GetPasswordFromStdIn(confirm bool) ([]byte, error) { - read := Read() - fmt.Fprint(os.Stderr, "Enter password for private key: ") - pw1, err := read() - fmt.Fprintln(os.Stderr) - if err != nil { - return nil, err - } - if !confirm { - return pw1, nil - } - fmt.Fprint(os.Stderr, "Enter again: ") - pw2, err := read() - fmt.Fprintln(os.Stderr) - if err != nil { - return nil, err - } - - if string(pw1) != string(pw2) { - return nil, errors.New("passwords do not match") - } - return pw1, nil -} diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go deleted file mode 100644 index 325813d692..0000000000 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go +++ /dev/null @@ -1,152 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cryptoutils - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - - "github.com/secure-systems-lab/go-securesystemslib/encrypted" -) - -const ( - // PrivateKeyPEMType is the string "PRIVATE KEY" to be used during PEM encoding and decoding - PrivateKeyPEMType PEMType = "PRIVATE KEY" - // ECPrivateKeyPEMType is the string "EC PRIVATE KEY" used to parse SEC 1 EC private keys - ECPrivateKeyPEMType PEMType = "EC PRIVATE KEY" - // PKCS1PrivateKeyPEMType is the string "RSA PRIVATE KEY" used to parse PKCS#1-encoded private keys - PKCS1PrivateKeyPEMType PEMType = "RSA PRIVATE KEY" - encryptedCosignPrivateKeyPEMType PEMType = "ENCRYPTED COSIGN PRIVATE KEY" - // EncryptedSigstorePrivateKeyPEMType is the string "ENCRYPTED SIGSTORE PRIVATE KEY" to be used during PEM encoding and decoding - EncryptedSigstorePrivateKeyPEMType PEMType = "ENCRYPTED SIGSTORE PRIVATE KEY" -) - -func pemEncodeKeyPair(priv crypto.PrivateKey, pub crypto.PublicKey, pf PassFunc) (privPEM, pubPEM []byte, err error) { - pubPEM, err = MarshalPublicKeyToPEM(pub) - if err != nil { - return nil, nil, err - } - derBytes, err := MarshalPrivateKeyToDER(priv) - if err != nil { - return nil, nil, err - } - - if pf == nil { - return PEMEncode(PrivateKeyPEMType, derBytes), pubPEM, nil - } - password, err := pf(true) - if err != nil { - return nil, nil, err - } - if password == nil { - return PEMEncode(PrivateKeyPEMType, derBytes), pubPEM, nil - } - if derBytes, err = encrypted.Encrypt(derBytes, password); err != nil { - return nil, nil, err - } - return PEMEncode(EncryptedSigstorePrivateKeyPEMType, derBytes), pubPEM, nil -} - -// GeneratePEMEncodedECDSAKeyPair generates an ECDSA keypair, optionally password encrypted using a provided PassFunc, and PEM encoded. -func GeneratePEMEncodedECDSAKeyPair(curve elliptic.Curve, pf PassFunc) (privPEM, pubPEM []byte, err error) { - priv, err := ecdsa.GenerateKey(curve, rand.Reader) - if err != nil { - return nil, nil, err - } - return pemEncodeKeyPair(priv, priv.Public(), pf) -} - -// GeneratePEMEncodedRSAKeyPair generates an RSA keypair, optionally password encrypted using a provided PassFunc, and PEM encoded. -func GeneratePEMEncodedRSAKeyPair(keyLengthBits int, pf PassFunc) (privPEM, pubPEM []byte, err error) { - priv, err := rsa.GenerateKey(rand.Reader, keyLengthBits) - if err != nil { - return nil, nil, err - } - return pemEncodeKeyPair(priv, priv.Public(), pf) -} - -// MarshalPrivateKeyToEncryptedDER marshals the private key and encrypts the DER-encoded value using the specified password function -func MarshalPrivateKeyToEncryptedDER(priv crypto.PrivateKey, pf PassFunc) ([]byte, error) { - derKey, err := MarshalPrivateKeyToDER(priv) - if err != nil { - return nil, err - } - password, err := pf(true) - if err != nil { - return nil, err - } - if password == nil { - return nil, errors.New("password was nil") - } - return encrypted.Encrypt(derKey, password) -} - -// UnmarshalPEMToPrivateKey converts a PEM-encoded byte slice into a crypto.PrivateKey -func UnmarshalPEMToPrivateKey(pemBytes []byte, pf PassFunc) (crypto.PrivateKey, error) { - derBlock, _ := pem.Decode(pemBytes) - if derBlock == nil { - return nil, errors.New("PEM decoding failed") - } - switch derBlock.Type { - case string(PrivateKeyPEMType): - return x509.ParsePKCS8PrivateKey(derBlock.Bytes) - case string(PKCS1PrivateKeyPEMType): - return x509.ParsePKCS1PrivateKey(derBlock.Bytes) - case string(ECPrivateKeyPEMType): - return x509.ParseECPrivateKey(derBlock.Bytes) - case string(EncryptedSigstorePrivateKeyPEMType), string(encryptedCosignPrivateKeyPEMType): - derBytes := derBlock.Bytes - if pf != nil { - password, err := pf(false) - if err != nil { - return nil, err - } - if password != nil { - derBytes, err = encrypted.Decrypt(derBytes, password) - if err != nil { - return nil, err - } - } - } - - return x509.ParsePKCS8PrivateKey(derBytes) - } - return nil, fmt.Errorf("unknown private key PEM file type: %v", derBlock.Type) -} - -// MarshalPrivateKeyToDER converts a crypto.PrivateKey into a PKCS8 ASN.1 DER byte slice -func MarshalPrivateKeyToDER(priv crypto.PrivateKey) ([]byte, error) { - if priv == nil { - return nil, errors.New("empty key") - } - return x509.MarshalPKCS8PrivateKey(priv) -} - -// MarshalPrivateKeyToPEM converts a crypto.PrivateKey into a PKCS#8 PEM-encoded byte slice -func MarshalPrivateKeyToPEM(priv crypto.PrivateKey) ([]byte, error) { - derBytes, err := MarshalPrivateKeyToDER(priv) - if err != nil { - return nil, err - } - return PEMEncode(PrivateKeyPEMType, derBytes), nil -} diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go deleted file mode 100644 index a8b2805e64..0000000000 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go +++ /dev/null @@ -1,184 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cryptoutils - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha1" // nolint:gosec - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - - "github.com/letsencrypt/boulder/goodkey" -) - -const ( - // PublicKeyPEMType is the string "PUBLIC KEY" to be used during PEM encoding and decoding - PublicKeyPEMType PEMType = "PUBLIC KEY" - // PKCS1PublicKeyPEMType is the string "RSA PUBLIC KEY" used to parse PKCS#1-encoded public keys - PKCS1PublicKeyPEMType PEMType = "RSA PUBLIC KEY" -) - -// subjectPublicKeyInfo is used to construct a subject key ID. -// https://tools.ietf.org/html/rfc5280#section-4.1.2.7 -type subjectPublicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - SubjectPublicKey asn1.BitString -} - -// UnmarshalPEMToPublicKey converts a PEM-encoded byte slice into a crypto.PublicKey -func UnmarshalPEMToPublicKey(pemBytes []byte) (crypto.PublicKey, error) { - derBytes, _ := pem.Decode(pemBytes) - if derBytes == nil { - return nil, errors.New("PEM decoding failed") - } - switch derBytes.Type { - case string(PublicKeyPEMType): - return x509.ParsePKIXPublicKey(derBytes.Bytes) - case string(PKCS1PublicKeyPEMType): - return x509.ParsePKCS1PublicKey(derBytes.Bytes) - default: - return nil, fmt.Errorf("unknown Public key PEM file type: %v. Are you passing the correct public key?", - derBytes.Type) - } -} - -// MarshalPublicKeyToDER converts a crypto.PublicKey into a PKIX, ASN.1 DER byte slice -func MarshalPublicKeyToDER(pub crypto.PublicKey) ([]byte, error) { - if pub == nil { - return nil, errors.New("empty key") - } - return x509.MarshalPKIXPublicKey(pub) -} - -// MarshalPublicKeyToPEM converts a crypto.PublicKey into a PEM-encoded byte slice -func MarshalPublicKeyToPEM(pub crypto.PublicKey) ([]byte, error) { - derBytes, err := MarshalPublicKeyToDER(pub) - if err != nil { - return nil, err - } - return PEMEncode(PublicKeyPEMType, derBytes), nil -} - -// SKID generates a 160-bit SHA-1 hash of the value of the BIT STRING -// subjectPublicKey (excluding the tag, length, and number of unused bits). -// https://tools.ietf.org/html/rfc5280#section-4.2.1.2 -func SKID(pub crypto.PublicKey) ([]byte, error) { - derPubBytes, err := x509.MarshalPKIXPublicKey(pub) - if err != nil { - return nil, err - } - var spki subjectPublicKeyInfo - if _, err := asn1.Unmarshal(derPubBytes, &spki); err != nil { - return nil, err - } - skid := sha1.Sum(spki.SubjectPublicKey.Bytes) // nolint:gosec - return skid[:], nil -} - -// EqualKeys compares two public keys. Supports RSA, ECDSA and ED25519. -// If not equal, the error message contains hex-encoded SHA1 hashes of the DER-encoded keys -func EqualKeys(first, second crypto.PublicKey) error { - switch pub := first.(type) { - case *rsa.PublicKey: - if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "rsa")) - } - case *ecdsa.PublicKey: - if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "ecdsa")) - } - case ed25519.PublicKey: - if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "ed25519")) - } - default: - return errors.New("unsupported key type") - } - return nil -} - -// genErrMsg generates an error message for EqualKeys -func genErrMsg(first, second crypto.PublicKey, keyType string) string { - msg := fmt.Sprintf("%s public keys are not equal", keyType) - // Calculate SKID to include in error message - firstSKID, err := SKID(first) - if err != nil { - return msg - } - secondSKID, err := SKID(second) - if err != nil { - return msg - } - return fmt.Sprintf("%s (%s, %s)", msg, hex.EncodeToString(firstSKID), hex.EncodeToString(secondSKID)) -} - -// ValidatePubKey validates the parameters of an RSA, ECDSA, or ED25519 public key. -func ValidatePubKey(pub crypto.PublicKey) error { - switch pk := pub.(type) { - case *rsa.PublicKey: - // goodkey policy enforces: - // * Size of key: 2048 <= size <= 4096, size % 8 = 0 - // * Exponent E = 65537 (Default exponent for OpenSSL and Golang) - // * Small primes check for modulus - // * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17) - // * Key is easily factored with Fermat's factorization method - p, err := goodkey.NewKeyPolicy(&goodkey.Config{FermatRounds: 100}, nil) - if err != nil { - // Should not occur, only chances to return errors are if fermat rounds - // are <0 or when loading blocked/weak keys from disk (not used here) - return errors.New("unable to initialize key policy") - } - // ctx is unused - return p.GoodKey(context.Background(), pub) - case *ecdsa.PublicKey: - // Unable to use goodkey policy because P-521 curve is not supported - return validateEcdsaKey(pk) - case ed25519.PublicKey: - return validateEd25519Key(pk) - } - return errors.New("unsupported public key type") -} - -// Enforce that the ECDSA key curve is one of: -// * NIST P-256 (secp256r1, prime256v1) -// * NIST P-384 -// * NIST P-521. -// Other EC curves, like secp256k1, are not supported by Go. -func validateEcdsaKey(pub *ecdsa.PublicKey) error { - switch pub.Curve { - case elliptic.P224(): - return fmt.Errorf("unsupported ec curve, expected NIST P-256, P-384, or P-521") - case elliptic.P256(), elliptic.P384(), elliptic.P521(): - return nil - default: - return fmt.Errorf("unexpected ec curve") - } -} - -// No validations currently, ED25519 supports only one key size. -func validateEd25519Key(_ ed25519.PublicKey) error { - return nil -} diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go deleted file mode 100644 index d237ef58ea..0000000000 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2022 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cryptoutils - -import ( - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "fmt" -) - -var ( - // OIDOtherName is the OID for the OtherName SAN per RFC 5280 - OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7} - // SANOID is the OID for Subject Alternative Name per RFC 5280 - SANOID = asn1.ObjectIdentifier{2, 5, 29, 17} -) - -// OtherName describes a name related to a certificate which is not in one -// of the standard name formats. RFC 5280, 4.2.1.6: -// -// OtherName ::= SEQUENCE { -// type-id OBJECT IDENTIFIER, -// value [0] EXPLICIT ANY DEFINED BY type-id } -// -// OtherName for Fulcio-issued certificates only supports UTF-8 strings as values. -type OtherName struct { - ID asn1.ObjectIdentifier - Value string `asn1:"utf8,explicit,tag:0"` -} - -// MarshalOtherNameSAN creates a Subject Alternative Name extension -// with an OtherName sequence. RFC 5280, 4.2.1.6: -// -// SubjectAltName ::= GeneralNames -// GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName -// GeneralName ::= CHOICE { -// -// otherName [0] OtherName, -// ... } -func MarshalOtherNameSAN(name string, critical bool) (*pkix.Extension, error) { - o := OtherName{ - ID: OIDOtherName, - Value: name, - } - bytes, err := asn1.MarshalWithParams(o, "tag:0") - if err != nil { - return nil, err - } - - sans, err := asn1.Marshal([]asn1.RawValue{{FullBytes: bytes}}) - if err != nil { - return nil, err - } - return &pkix.Extension{ - Id: SANOID, - Critical: critical, - Value: sans, - }, nil -} - -// UnmarshalOtherNameSAN extracts a UTF-8 string from the OtherName -// field in the Subject Alternative Name extension. -func UnmarshalOtherNameSAN(exts []pkix.Extension) (string, error) { - var otherNames []string - - for _, e := range exts { - if !e.Id.Equal(SANOID) { - continue - } - - var seq asn1.RawValue - rest, err := asn1.Unmarshal(e.Value, &seq) - if err != nil { - return "", err - } else if len(rest) != 0 { - return "", fmt.Errorf("trailing data after X.509 extension") - } - if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal { - return "", asn1.StructuralError{Msg: "bad SAN sequence"} - } - - rest = seq.Bytes - for len(rest) > 0 { - var v asn1.RawValue - rest, err = asn1.Unmarshal(rest, &v) - if err != nil { - return "", err - } - - // skip all GeneralName fields except OtherName - if v.Tag != 0 { - continue - } - - var other OtherName - if _, err := asn1.UnmarshalWithParams(v.FullBytes, &other, "tag:0"); err != nil { - return "", fmt.Errorf("could not parse requested OtherName SAN: %w", err) - } - if !other.ID.Equal(OIDOtherName) { - return "", fmt.Errorf("unexpected OID for OtherName, expected %v, got %v", OIDOtherName, other.ID) - } - otherNames = append(otherNames, other.Value) - } - } - - if len(otherNames) == 0 { - return "", errors.New("no OtherName found") - } - if len(otherNames) != 1 { - return "", errors.New("expected only one OtherName") - } - - return otherNames[0], nil -} - -// GetSubjectAlternateNames extracts all subject alternative names from -// the certificate, including email addresses, DNS, IP addresses, URIs, -// and OtherName SANs -func GetSubjectAlternateNames(cert *x509.Certificate) []string { - sans := []string{} - sans = append(sans, cert.DNSNames...) - sans = append(sans, cert.EmailAddresses...) - for _, ip := range cert.IPAddresses { - sans = append(sans, ip.String()) - } - for _, uri := range cert.URIs { - sans = append(sans, uri.String()) - } - // ignore error if there's no OtherName SAN - otherName, _ := UnmarshalOtherNameSAN(cert.Extensions) - if len(otherName) > 0 { - sans = append(sans, otherName) - } - return sans -} diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore deleted file mode 100644 index 1fb13abebe..0000000000 --- a/vendor/github.com/sirupsen/logrus/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -logrus -vendor - -.idea/ diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml deleted file mode 100644 index 65dc285037..0000000000 --- a/vendor/github.com/sirupsen/logrus/.golangci.yml +++ /dev/null @@ -1,40 +0,0 @@ -run: - # do not run on test files yet - tests: false - -# all available settings of specific linters -linters-settings: - errcheck: - # report about not checking of errors in type assetions: `a := b.(MyStruct)`; - # default is false: such cases aren't reported by default. - check-type-assertions: false - - # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; - # default is false: such cases aren't reported by default. - check-blank: false - - lll: - line-length: 100 - tab-width: 4 - - prealloc: - simple: false - range-loops: false - for-loops: false - - whitespace: - multi-if: false # Enforces newlines (or comments) after every multi-line if statement - multi-func: false # Enforces newlines (or comments) after every multi-line function signature - -linters: - enable: - - megacheck - - govet - disable: - - maligned - - prealloc - disable-all: false - presets: - - bugs - - unused - fast: false diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml deleted file mode 100644 index c1dbd5a3a3..0000000000 --- a/vendor/github.com/sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go_import_path: github.com/sirupsen/logrus -git: - depth: 1 -env: - - GO111MODULE=on -go: 1.15.x -os: linux -install: - - ./travis/install.sh -script: - - cd ci - - go run mage.go -v -w ../ crossBuild - - go run mage.go -v -w ../ lint - - go run mage.go -v -w ../ test diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index 7567f61289..0000000000 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,259 +0,0 @@ -# 1.8.1 -Code quality: - * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer - * improve timestamp format documentation - -Fixes: - * fix race condition on logger hooks - - -# 1.8.0 - -Correct versioning number replacing v1.7.1. - -# 1.7.1 - -Beware this release has introduced a new public API and its semver is therefore incorrect. - -Code quality: - * use go 1.15 in travis - * use magefile as task runner - -Fixes: - * small fixes about new go 1.13 error formatting system - * Fix for long time race condiction with mutating data hooks - -Features: - * build support for zos - -# 1.7.0 -Fixes: - * the dependency toward a windows terminal library has been removed - -Features: - * a new buffer pool management API has been added - * a set of `Fn()` functions have been added - -# 1.6.0 -Fixes: - * end of line cleanup - * revert the entry concurrency bug fix whic leads to deadlock under some circumstances - * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 - -Features: - * add an option to the `TextFormatter` to completely disable fields quoting - -# 1.5.0 -Code quality: - * add golangci linter run on travis - -Fixes: - * add mutex for hooks concurrent access on `Entry` data - * caller function field for go1.14 - * fix build issue for gopherjs target - -Feature: - * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level - * add a `DisableHTMLEscape` option in the `JSONFormatter` - * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` - -# 1.4.2 - * Fixes build break for plan9, nacl, solaris -# 1.4.1 -This new release introduces: - * Enhance TextFormatter to not print caller information when they are empty (#944) - * Remove dependency on golang.org/x/crypto (#932, #943) - -Fixes: - * Fix Entry.WithContext method to return a copy of the initial entry (#941) - -# 1.4.0 -This new release introduces: - * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). - * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) - * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). - -Fixes: - * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). - * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) - * Fix infinite recursion on unknown `Level.String()` (#907) - * Fix race condition in `getCaller` (#916). - - -# 1.3.0 -This new release introduces: - * Log, Logf, Logln functions for Logger and Entry that take a Level - -Fixes: - * Building prometheus node_exporter on AIX (#840) - * Race condition in TextFormatter (#468) - * Travis CI import path (#868) - * Remove coloured output on Windows (#862) - * Pointer to func as field in JSONFormatter (#870) - * Properly marshal Levels (#873) - -# 1.2.0 -This new release introduces: - * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued - * A new trace level named `Trace` whose level is below `Debug` - * A configurable exit function to be called upon a Fatal trace - * The `Level` object now implements `encoding.TextUnmarshaler` interface - -# 1.1.1 -This is a bug fix release. - * fix the build break on Solaris - * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized - -# 1.1.0 -This new release introduces: - * several fixes: - * a fix for a race condition on entry formatting - * proper cleanup of previously used entries before putting them back in the pool - * the extra new line at the end of message in text formatter has been removed - * a new global public API to check if a level is activated: IsLevelEnabled - * the following methods have been added to the Logger object - * IsLevelEnabled - * SetFormatter - * SetOutput - * ReplaceHooks - * introduction of go module - * an indent configuration for the json formatter - * output colour support for windows - * the field sort function is now configurable for text formatter - * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater - -# 1.0.6 - -This new release introduces: - * a new api WithTime which allows to easily force the time of the log entry - which is mostly useful for logger wrapper - * a fix reverting the immutability of the entry given as parameter to the hooks - a new configuration field of the json formatter in order to put all the fields - in a nested dictionnary - * a new SetOutput method in the Logger - * a new configuration of the textformatter to configure the name of the default keys - * a new configuration of the text formatter to disable the level truncation - -# 1.0.5 - -* Fix hooks race (#707) -* Fix panic deadlock (#695) - -# 1.0.4 - -* Fix race when adding hooks (#612) -* Fix terminal check in AppEngine (#635) - -# 1.0.3 - -* Replace example files with testable examples - -# 1.0.2 - -* bug: quote non-string values in text formatter (#583) -* Make (*Logger) SetLevel a public method - -# 1.0.1 - -* bug: fix escaping in text formatter (#575) - -# 1.0.0 - -* Officially changed name to lower-case -* bug: colors on Windows 10 (#541) -* bug: fix race in accessing level (#512) - -# 0.11.5 - -* feature: add writer and writerlevel to entry (#372) - -# 0.11.4 - -* bug: fix undefined variable on solaris (#493) - -# 0.11.3 - -* formatter: configure quoting of empty values (#484) -* formatter: configure quoting character (default is `"`) (#484) -* bug: fix not importing io correctly in non-linux environments (#481) - -# 0.11.2 - -* bug: fix windows terminal detection (#476) - -# 0.11.1 - -* bug: fix tty detection with custom out (#471) - -# 0.11.0 - -* performance: Use bufferpool to allocate (#370) -* terminal: terminal detection for app-engine (#343) -* feature: exit handler (#375) - -# 0.10.0 - -* feature: Add a test hook (#180) -* feature: `ParseLevel` is now case-insensitive (#326) -* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) -* performance: avoid re-allocations on `WithFields` (#335) - -# 0.9.0 - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository -* logrus/core: run tests with `-race` -* logrus/core: detect TTY based on `stderr` -* logrus/core: support `WithError` on logger -* logrus/core: Solaris support - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb42f3..0000000000 --- a/vendor/github.com/sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md deleted file mode 100644 index d1d4a85fd7..0000000000 --- a/vendor/github.com/sirupsen/logrus/README.md +++ /dev/null @@ -1,515 +0,0 @@ -# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. - -**Logrus is in maintenance-mode.** We will not be introducing new features. It's -simply too hard to do in a way that won't break many people's projects, which is -the last thing you want from your Logging library (again...). - -This does not mean Logrus is dead. Logrus will continue to be maintained for -security, (backwards compatible) bug fixes, and performance (where we are -limited by the interface). - -I believe Logrus' biggest contribution is to have played a part in today's -widespread use of structured logging in Golang. There doesn't seem to be a -reason to do a major, breaking iteration into Logrus V2, since the fantastic Go -community has built those independently. Many fantastic alternatives have sprung -up. Logrus would look like those, had it been re-designed with what we know -about structured logging in Go today. Check out, for example, -[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. - -[zerolog]: https://github.com/rs/zerolog -[zap]: https://github.com/uber-go/zap -[apex]: https://github.com/apex/log - -**Seeing weird case-sensitive problems?** It's in the past been possible to -import Logrus as both upper- and lower-case. Due to the Go package environment, -this caused issues in the community and we needed a standard. Some environments -experienced problems with the upper-case variant, so the lower-case was decided. -Everything using `logrus` will need to use the lower-case: -`github.com/sirupsen/logrus`. Any package that isn't, should be changed. - -To fix Glide, see [these -comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). -For an in-depth explanation of the casing issue, see [this -comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash -or Splunk: - -```text -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -``` -To ensure this behaviour even if a TTY is attached, set your formatter as follows: - -```go - log.SetFormatter(&log.TextFormatter{ - DisableColors: true, - FullTimestamp: true, - }) -``` - -#### Logging Method Name - -If you wish to add the calling method as a field, instruct the logger via: -```go -log.SetReportCaller(true) -``` -This adds the caller as 'method' like so: - -```json -{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", -"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} -``` - -```text -time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin -``` -Note that this does add measurable overhead - the cost will depend on the version of Go, but is -between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your -environment via benchmarks: -``` -go test -bench=.*CallerTracing -``` - - -#### Case-sensitivity - -The organization's name was changed to lower-case--and this will not be changed -back. If you are getting import conflicts due to case sensitivity, please use -the lower-case import: `github.com/sirupsen/logrus`. - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stdout instead of the default stderr - // Can be any io.Writer, see below for File example - log.SetOutput(os.Stdout) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "os" - "github.com/sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stdout - - // You could set this to any `io.Writer` such as a file - // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) - // if err == nil { - // log.Out = file - // } else { - // log.Info("Failed to log to file, using default stderr") - // } - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging through logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Default Fields - -Often it's helpful to have fields _always_ attached to log statements in an -application or parts of one. For example, you may want to always log the -`request_id` and `user_ip` in the context of a request. Instead of writing -`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on -every line, you can create a `logrus.Entry` to pass around instead: - -```go -requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) -requestLogger.Info("something happened on that request") # will log request_id and user_ip -requestLogger.Warn("something not great happened") -``` - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" - logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) - - -#### Level logging - -Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Trace("Something very low level.") -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/sirupsen/logrus" -) - -func init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true`. For Windows, see - [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). - * When colors are enabled, levels are truncated to 4 characters by default. To disable - truncation set the `DisableLevelTruncation` field to `true`. - * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). -* `logrus.JSONFormatter`. Logs fields as JSON. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). - -Third party logging formatters: - -* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. -* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). -* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. -* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. -* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. -* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. -* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -This means that we can override the standard library logger easily: - -```go -logger := logrus.New() -logger.Formatter = &logrus.JSONFormatter{} - -// Use logrus for standard log output -// Note that `log` here references stdlib's log -// Not logrus imported under the name `log`. -log.SetOutput(logger.Writer()) -``` - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - -#### Tools - -| Tool | Description | -| ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| -|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | - -#### Testing - -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: - -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook -* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): - -```go -import( - "github.com/sirupsen/logrus" - "github.com/sirupsen/logrus/hooks/test" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestSomething(t*testing.T){ - logger, hook := test.NewNullLogger() - logger.Error("Helloerror") - - assert.Equal(t, 1, len(hook.Entries)) - assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) - assert.Equal(t, "Helloerror", hook.LastEntry().Message) - - hook.Reset() - assert.Nil(t, hook.LastEntry()) -} -``` - -#### Fatal handlers - -Logrus can register one or more functions that will be called when any `fatal` -level message is logged. The registered handlers will be executed before -logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need -to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. - -``` -... -handler := func() { - // gracefully shutdown something... -} -logrus.RegisterExitHandler(handler) -... -``` - -#### Thread safety - -By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. -If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. - -Situation when locking is not needed includes: - -* You have no hooks registered, or hooks calling is already thread-safe. - -* Writing to logger.Out is already thread-safe, for example: - - 1) logger.Out is protected by locks. - - 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) - - (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go deleted file mode 100644 index 8fd189e1cc..0000000000 --- a/vendor/github.com/sirupsen/logrus/alt_exit.go +++ /dev/null @@ -1,76 +0,0 @@ -package logrus - -// The following code was sourced and modified from the -// https://github.com/tebeka/atexit package governed by the following license: -// -// Copyright (c) 2012 Miki Tebeka . -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software is furnished to do so, -// subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -import ( - "fmt" - "os" -) - -var handlers = []func(){} - -func runHandler(handler func()) { - defer func() { - if err := recover(); err != nil { - fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) - } - }() - - handler() -} - -func runHandlers() { - for _, handler := range handlers { - runHandler(handler) - } -} - -// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) -func Exit(code int) { - runHandlers() - os.Exit(code) -} - -// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, -// call logrus.Exit to invoke all handlers. The handlers will also be invoked when -// any Fatal log entry is made. -// -// This method is useful when a caller wishes to use logrus to log a fatal -// message but also needs to gracefully shutdown. An example usecase could be -// closing database connections, or sending a alert that the application is -// closing. -func RegisterExitHandler(handler func()) { - handlers = append(handlers, handler) -} - -// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, -// call logrus.Exit to invoke all handlers. The handlers will also be invoked when -// any Fatal log entry is made. -// -// This method is useful when a caller wishes to use logrus to log a fatal -// message but also needs to gracefully shutdown. An example usecase could be -// closing database connections, or sending a alert that the application is -// closing. -func DeferExitHandler(handler func()) { - handlers = append([]func(){handler}, handlers...) -} diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml deleted file mode 100644 index df9d65c3a5..0000000000 --- a/vendor/github.com/sirupsen/logrus/appveyor.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: "{build}" -platform: x64 -clone_folder: c:\gopath\src\github.com\sirupsen\logrus -environment: - GOPATH: c:\gopath -branches: - only: - - master -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version -build_script: - - go get -t - - go test diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go deleted file mode 100644 index c7787f77cb..0000000000 --- a/vendor/github.com/sirupsen/logrus/buffer_pool.go +++ /dev/null @@ -1,43 +0,0 @@ -package logrus - -import ( - "bytes" - "sync" -) - -var ( - bufferPool BufferPool -) - -type BufferPool interface { - Put(*bytes.Buffer) - Get() *bytes.Buffer -} - -type defaultPool struct { - pool *sync.Pool -} - -func (p *defaultPool) Put(buf *bytes.Buffer) { - p.pool.Put(buf) -} - -func (p *defaultPool) Get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -// SetBufferPool allows to replace the default logrus buffer pool -// to better meets the specific needs of an application. -func SetBufferPool(bp BufferPool) { - bufferPool = bp -} - -func init() { - SetBufferPool(&defaultPool{ - pool: &sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, - }, - }) -} diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go deleted file mode 100644 index da67aba06d..0000000000 --- a/vendor/github.com/sirupsen/logrus/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Package logrus is a structured logger for Go, completely API compatible with the standard library logger. - - -The simplest way to use Logrus is simply the package-level exported logger: - - package main - - import ( - log "github.com/sirupsen/logrus" - ) - - func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "number": 1, - "size": 10, - }).Info("A walrus appears") - } - -Output: - time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 - -For a full guide visit https://github.com/sirupsen/logrus -*/ -package logrus diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go deleted file mode 100644 index 71cdbbc35d..0000000000 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ /dev/null @@ -1,442 +0,0 @@ -package logrus - -import ( - "bytes" - "context" - "fmt" - "os" - "reflect" - "runtime" - "strings" - "sync" - "time" -) - -var ( - - // qualified package name, cached at first use - logrusPackage string - - // Positions in the call stack when tracing to report the calling method - minimumCallerDepth int - - // Used for caller information initialisation - callerInitOnce sync.Once -) - -const ( - maximumCallerDepth int = 25 - knownLogrusFrames int = 4 -) - -func init() { - // start at the bottom of the stack before the package-name cache is primed - minimumCallerDepth = 1 -} - -// Defines the key when adding errors using WithError. -var ErrorKey = "error" - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, -// Info, Warn, Error, Fatal or Panic is called on it. These objects can be -// reused and passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic - // This field will be set on entry firing and the value will be equal to the one in Logger struct field. - Level Level - - // Calling method, with package name - Caller *runtime.Frame - - // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic - Message string - - // When formatter is called in entry.log(), a Buffer may be set to entry - Buffer *bytes.Buffer - - // Contains the context set by the user. Useful for hook processing etc. - Context context.Context - - // err may contain a field formatting error - err string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, plus one optional. Give a little extra room. - Data: make(Fields, 6), - } -} - -func (entry *Entry) Dup() *Entry { - data := make(Fields, len(entry.Data)) - for k, v := range entry.Data { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} -} - -// Returns the bytes representation of this entry from the formatter. -func (entry *Entry) Bytes() ([]byte, error) { - return entry.Logger.Formatter.Format(entry) -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - serialized, err := entry.Bytes() - if err != nil { - return "", err - } - str := string(serialized) - return str, nil -} - -// Add an error as single field (using the key defined in ErrorKey) to the Entry. -func (entry *Entry) WithError(err error) *Entry { - return entry.WithField(ErrorKey, err) -} - -// Add a context to the Entry. -func (entry *Entry) WithContext(ctx context.Context) *Entry { - dataCopy := make(Fields, len(entry.Data)) - for k, v := range entry.Data { - dataCopy[k] = v - } - return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := make(Fields, len(entry.Data)+len(fields)) - for k, v := range entry.Data { - data[k] = v - } - fieldErr := entry.err - for k, v := range fields { - isErrField := false - if t := reflect.TypeOf(v); t != nil { - switch { - case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: - isErrField = true - } - } - if isErrField { - tmp := fmt.Sprintf("can not add field %q", k) - if fieldErr != "" { - fieldErr = entry.err + ", " + tmp - } else { - fieldErr = tmp - } - } else { - data[k] = v - } - } - return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} -} - -// Overrides the time of the Entry. -func (entry *Entry) WithTime(t time.Time) *Entry { - dataCopy := make(Fields, len(entry.Data)) - for k, v := range entry.Data { - dataCopy[k] = v - } - return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} -} - -// getPackageName reduces a fully qualified function name to the package name -// There really ought to be to be a better way... -func getPackageName(f string) string { - for { - lastPeriod := strings.LastIndex(f, ".") - lastSlash := strings.LastIndex(f, "/") - if lastPeriod > lastSlash { - f = f[:lastPeriod] - } else { - break - } - } - - return f -} - -// getCaller retrieves the name of the first non-logrus calling function -func getCaller() *runtime.Frame { - // cache this package's fully-qualified name - callerInitOnce.Do(func() { - pcs := make([]uintptr, maximumCallerDepth) - _ = runtime.Callers(0, pcs) - - // dynamic get the package name and the minimum caller depth - for i := 0; i < maximumCallerDepth; i++ { - funcName := runtime.FuncForPC(pcs[i]).Name() - if strings.Contains(funcName, "getCaller") { - logrusPackage = getPackageName(funcName) - break - } - } - - minimumCallerDepth = knownLogrusFrames - }) - - // Restrict the lookback frames to avoid runaway lookups - pcs := make([]uintptr, maximumCallerDepth) - depth := runtime.Callers(minimumCallerDepth, pcs) - frames := runtime.CallersFrames(pcs[:depth]) - - for f, again := frames.Next(); again; f, again = frames.Next() { - pkg := getPackageName(f.Function) - - // If the caller isn't part of this package, we're done - if pkg != logrusPackage { - return &f //nolint:scopelint - } - } - - // if we got here, we failed to find the caller's context - return nil -} - -func (entry Entry) HasCaller() (has bool) { - return entry.Logger != nil && - entry.Logger.ReportCaller && - entry.Caller != nil -} - -func (entry *Entry) log(level Level, msg string) { - var buffer *bytes.Buffer - - newEntry := entry.Dup() - - if newEntry.Time.IsZero() { - newEntry.Time = time.Now() - } - - newEntry.Level = level - newEntry.Message = msg - - newEntry.Logger.mu.Lock() - reportCaller := newEntry.Logger.ReportCaller - bufPool := newEntry.getBufferPool() - newEntry.Logger.mu.Unlock() - - if reportCaller { - newEntry.Caller = getCaller() - } - - newEntry.fireHooks() - buffer = bufPool.Get() - defer func() { - newEntry.Buffer = nil - buffer.Reset() - bufPool.Put(buffer) - }() - buffer.Reset() - newEntry.Buffer = buffer - - newEntry.write() - - newEntry.Buffer = nil - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(newEntry) - } -} - -func (entry *Entry) getBufferPool() (pool BufferPool) { - if entry.Logger.BufferPool != nil { - return entry.Logger.BufferPool - } - return bufferPool -} - -func (entry *Entry) fireHooks() { - var tmpHooks LevelHooks - entry.Logger.mu.Lock() - tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) - for k, v := range entry.Logger.Hooks { - tmpHooks[k] = v - } - entry.Logger.mu.Unlock() - - err := tmpHooks.Fire(entry.Level, entry) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - } -} - -func (entry *Entry) write() { - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - serialized, err := entry.Logger.Formatter.Format(entry) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - return - } - if _, err := entry.Logger.Out.Write(serialized); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } -} - -// Log will log a message at the level given as parameter. -// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. -// For this behaviour Entry.Panic or Entry.Fatal should be used instead. -func (entry *Entry) Log(level Level, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.log(level, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Trace(args ...interface{}) { - entry.Log(TraceLevel, args...) -} - -func (entry *Entry) Debug(args ...interface{}) { - entry.Log(DebugLevel, args...) -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - entry.Log(InfoLevel, args...) -} - -func (entry *Entry) Warn(args ...interface{}) { - entry.Log(WarnLevel, args...) -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - entry.Log(ErrorLevel, args...) -} - -func (entry *Entry) Fatal(args ...interface{}) { - entry.Log(FatalLevel, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - entry.Log(PanicLevel, args...) -} - -// Entry Printf family functions - -func (entry *Entry) Logf(level Level, format string, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.Log(level, fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Tracef(format string, args ...interface{}) { - entry.Logf(TraceLevel, format, args...) -} - -func (entry *Entry) Debugf(format string, args ...interface{}) { - entry.Logf(DebugLevel, format, args...) -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - entry.Logf(InfoLevel, format, args...) -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - entry.Logf(WarnLevel, format, args...) -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - entry.Logf(ErrorLevel, format, args...) -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - entry.Logf(FatalLevel, format, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - entry.Logf(PanicLevel, format, args...) -} - -// Entry Println family functions - -func (entry *Entry) Logln(level Level, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.Log(level, entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Traceln(args ...interface{}) { - entry.Logln(TraceLevel, args...) -} - -func (entry *Entry) Debugln(args ...interface{}) { - entry.Logln(DebugLevel, args...) -} - -func (entry *Entry) Infoln(args ...interface{}) { - entry.Logln(InfoLevel, args...) -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - entry.Logln(WarnLevel, args...) -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - entry.Logln(ErrorLevel, args...) -} - -func (entry *Entry) Fatalln(args ...interface{}) { - entry.Logln(FatalLevel, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panicln(args ...interface{}) { - entry.Logln(PanicLevel, args...) -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go deleted file mode 100644 index 017c30ce67..0000000000 --- a/vendor/github.com/sirupsen/logrus/exported.go +++ /dev/null @@ -1,270 +0,0 @@ -package logrus - -import ( - "context" - "io" - "time" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.SetOutput(out) -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.SetFormatter(formatter) -} - -// SetReportCaller sets whether the standard logger will include the calling -// method as a field. -func SetReportCaller(include bool) { - std.SetReportCaller(include) -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.SetLevel(level) -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - return std.GetLevel() -} - -// IsLevelEnabled checks if the log level of the standard logger is greater than the level param -func IsLevelEnabled(level Level) bool { - return std.IsLevelEnabled(level) -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.AddHook(hook) -} - -// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. -func WithError(err error) *Entry { - return std.WithField(ErrorKey, err) -} - -// WithContext creates an entry from the standard logger and adds a context to it. -func WithContext(ctx context.Context) *Entry { - return std.WithContext(ctx) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// WithTime creates an entry from the standard logger and overrides the time of -// logs generated with it. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithTime(t time.Time) *Entry { - return std.WithTime(t) -} - -// Trace logs a message at level Trace on the standard logger. -func Trace(args ...interface{}) { - std.Trace(args...) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// TraceFn logs a message from a func at level Trace on the standard logger. -func TraceFn(fn LogFunction) { - std.TraceFn(fn) -} - -// DebugFn logs a message from a func at level Debug on the standard logger. -func DebugFn(fn LogFunction) { - std.DebugFn(fn) -} - -// PrintFn logs a message from a func at level Info on the standard logger. -func PrintFn(fn LogFunction) { - std.PrintFn(fn) -} - -// InfoFn logs a message from a func at level Info on the standard logger. -func InfoFn(fn LogFunction) { - std.InfoFn(fn) -} - -// WarnFn logs a message from a func at level Warn on the standard logger. -func WarnFn(fn LogFunction) { - std.WarnFn(fn) -} - -// WarningFn logs a message from a func at level Warn on the standard logger. -func WarningFn(fn LogFunction) { - std.WarningFn(fn) -} - -// ErrorFn logs a message from a func at level Error on the standard logger. -func ErrorFn(fn LogFunction) { - std.ErrorFn(fn) -} - -// PanicFn logs a message from a func at level Panic on the standard logger. -func PanicFn(fn LogFunction) { - std.PanicFn(fn) -} - -// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. -func FatalFn(fn LogFunction) { - std.FatalFn(fn) -} - -// Tracef logs a message at level Trace on the standard logger. -func Tracef(format string, args ...interface{}) { - std.Tracef(format, args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Traceln logs a message at level Trace on the standard logger. -func Traceln(args ...interface{}) { - std.Traceln(args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go deleted file mode 100644 index 408883773e..0000000000 --- a/vendor/github.com/sirupsen/logrus/formatter.go +++ /dev/null @@ -1,78 +0,0 @@ -package logrus - -import "time" - -// Default key names for the default fields -const ( - defaultTimestampFormat = time.RFC3339 - FieldKeyMsg = "msg" - FieldKeyLevel = "level" - FieldKeyTime = "time" - FieldKeyLogrusError = "logrus_error" - FieldKeyFunc = "func" - FieldKeyFile = "file" -) - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { - timeKey := fieldMap.resolve(FieldKeyTime) - if t, ok := data[timeKey]; ok { - data["fields."+timeKey] = t - delete(data, timeKey) - } - - msgKey := fieldMap.resolve(FieldKeyMsg) - if m, ok := data[msgKey]; ok { - data["fields."+msgKey] = m - delete(data, msgKey) - } - - levelKey := fieldMap.resolve(FieldKeyLevel) - if l, ok := data[levelKey]; ok { - data["fields."+levelKey] = l - delete(data, levelKey) - } - - logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) - if l, ok := data[logrusErrKey]; ok { - data["fields."+logrusErrKey] = l - delete(data, logrusErrKey) - } - - // If reportCaller is not set, 'func' will not conflict. - if reportCaller { - funcKey := fieldMap.resolve(FieldKeyFunc) - if l, ok := data[funcKey]; ok { - data["fields."+funcKey] = l - } - fileKey := fieldMap.resolve(FieldKeyFile) - if l, ok := data[fileKey]; ok { - data["fields."+fileKey] = l - } - } -} diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go deleted file mode 100644 index 3f151cdc39..0000000000 --- a/vendor/github.com/sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type LevelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks LevelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks LevelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go deleted file mode 100644 index c96dc5636b..0000000000 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,128 +0,0 @@ -package logrus - -import ( - "bytes" - "encoding/json" - "fmt" - "runtime" -) - -type fieldKey string - -// FieldMap allows customization of the key names for default fields. -type FieldMap map[fieldKey]string - -func (f FieldMap) resolve(key fieldKey) string { - if k, ok := f[key]; ok { - return k - } - - return string(key) -} - -// JSONFormatter formats logs into parsable json -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - // The format to use is the same than for time.Format or time.Parse from the standard - // library. - // The standard Library already provides a set of predefined format. - TimestampFormat string - - // DisableTimestamp allows disabling automatic timestamps in output - DisableTimestamp bool - - // DisableHTMLEscape allows disabling html escaping in output - DisableHTMLEscape bool - - // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. - DataKey string - - // FieldMap allows users to customize the names of keys for default fields. - // As an example: - // formatter := &JSONFormatter{ - // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", - // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message", - // FieldKeyFunc: "@caller", - // }, - // } - FieldMap FieldMap - - // CallerPrettyfier can be set by the user to modify the content - // of the function and file keys in the json data when ReportCaller is - // activated. If any of the returned value is the empty string the - // corresponding key will be removed from json fields. - CallerPrettyfier func(*runtime.Frame) (function string, file string) - - // PrettyPrint will indent all json logs - PrettyPrint bool -} - -// Format renders a single log entry -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+4) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - - if f.DataKey != "" { - newData := make(Fields, 4) - newData[f.DataKey] = data - data = newData - } - - prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = defaultTimestampFormat - } - - if entry.err != "" { - data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err - } - if !f.DisableTimestamp { - data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) - } - data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message - data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() - if entry.HasCaller() { - funcVal := entry.Caller.Function - fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } - if funcVal != "" { - data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal - } - if fileVal != "" { - data[f.FieldMap.resolve(FieldKeyFile)] = fileVal - } - } - - var b *bytes.Buffer - if entry.Buffer != nil { - b = entry.Buffer - } else { - b = &bytes.Buffer{} - } - - encoder := json.NewEncoder(b) - encoder.SetEscapeHTML(!f.DisableHTMLEscape) - if f.PrettyPrint { - encoder.SetIndent("", " ") - } - if err := encoder.Encode(data); err != nil { - return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) - } - - return b.Bytes(), nil -} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go deleted file mode 100644 index 5ff0aef6d3..0000000000 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ /dev/null @@ -1,417 +0,0 @@ -package logrus - -import ( - "context" - "io" - "os" - "sync" - "sync/atomic" - "time" -) - -// LogFunction For big messages, it can be more efficient to pass a function -// and only call it if the log level is actually enables rather than -// generating the log message and then checking if the level is enabled -type LogFunction func() []interface{} - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stderr`. You can also set this to - // something more adventurous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks LevelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - - // Flag for whether to log caller info (off by default) - ReportCaller bool - - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. - Level Level - // Used to sync writing to the log. Locking is enabled by Default - mu MutexWrap - // Reusable empty entry - entryPool sync.Pool - // Function to exit the application, defaults to `os.Exit()` - ExitFunc exitFunc - // The buffer pool used to format the log. If it is nil, the default global - // buffer pool will be used. - BufferPool BufferPool -} - -type exitFunc func(int) - -type MutexWrap struct { - lock sync.Mutex - disabled bool -} - -func (mw *MutexWrap) Lock() { - if !mw.disabled { - mw.lock.Lock() - } -} - -func (mw *MutexWrap) Unlock() { - if !mw.disabled { - mw.lock.Unlock() - } -} - -func (mw *MutexWrap) Disable() { - mw.disabled = true -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &logrus.Logger{ -// Out: os.Stderr, -// Formatter: new(logrus.TextFormatter), -// Hooks: make(logrus.LevelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, - ExitFunc: os.Exit, - ReportCaller: false, - } -} - -func (logger *Logger) newEntry() *Entry { - entry, ok := logger.entryPool.Get().(*Entry) - if ok { - return entry - } - return NewEntry(logger) -} - -func (logger *Logger) releaseEntry(entry *Entry) { - entry.Data = map[string]interface{}{} - logger.entryPool.Put(entry) -} - -// WithField allocates a new entry and adds a field to it. -// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to -// this new returned entry. -// If you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithFields(fields) -} - -// Add an error as single field to the log entry. All it does is call -// `WithError` for the given `error`. -func (logger *Logger) WithError(err error) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithError(err) -} - -// Add a context to the log entry. -func (logger *Logger) WithContext(ctx context.Context) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithContext(ctx) -} - -// Overrides the time of the log entry. -func (logger *Logger) WithTime(t time.Time) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithTime(t) -} - -func (logger *Logger) Logf(level Level, format string, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Logf(level, format, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Tracef(format string, args ...interface{}) { - logger.Logf(TraceLevel, format, args...) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - logger.Logf(DebugLevel, format, args...) -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - logger.Logf(InfoLevel, format, args...) -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - entry := logger.newEntry() - entry.Printf(format, args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - logger.Logf(WarnLevel, format, args...) -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - logger.Warnf(format, args...) -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - logger.Logf(ErrorLevel, format, args...) -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - logger.Logf(FatalLevel, format, args...) - logger.Exit(1) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - logger.Logf(PanicLevel, format, args...) -} - -// Log will log a message at the level given as parameter. -// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. -// For this behaviour Logger.Panic or Logger.Fatal should be used instead. -func (logger *Logger) Log(level Level, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Log(level, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) LogFn(level Level, fn LogFunction) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Log(level, fn()...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Trace(args ...interface{}) { - logger.Log(TraceLevel, args...) -} - -func (logger *Logger) Debug(args ...interface{}) { - logger.Log(DebugLevel, args...) -} - -func (logger *Logger) Info(args ...interface{}) { - logger.Log(InfoLevel, args...) -} - -func (logger *Logger) Print(args ...interface{}) { - entry := logger.newEntry() - entry.Print(args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warn(args ...interface{}) { - logger.Log(WarnLevel, args...) -} - -func (logger *Logger) Warning(args ...interface{}) { - logger.Warn(args...) -} - -func (logger *Logger) Error(args ...interface{}) { - logger.Log(ErrorLevel, args...) -} - -func (logger *Logger) Fatal(args ...interface{}) { - logger.Log(FatalLevel, args...) - logger.Exit(1) -} - -func (logger *Logger) Panic(args ...interface{}) { - logger.Log(PanicLevel, args...) -} - -func (logger *Logger) TraceFn(fn LogFunction) { - logger.LogFn(TraceLevel, fn) -} - -func (logger *Logger) DebugFn(fn LogFunction) { - logger.LogFn(DebugLevel, fn) -} - -func (logger *Logger) InfoFn(fn LogFunction) { - logger.LogFn(InfoLevel, fn) -} - -func (logger *Logger) PrintFn(fn LogFunction) { - entry := logger.newEntry() - entry.Print(fn()...) - logger.releaseEntry(entry) -} - -func (logger *Logger) WarnFn(fn LogFunction) { - logger.LogFn(WarnLevel, fn) -} - -func (logger *Logger) WarningFn(fn LogFunction) { - logger.WarnFn(fn) -} - -func (logger *Logger) ErrorFn(fn LogFunction) { - logger.LogFn(ErrorLevel, fn) -} - -func (logger *Logger) FatalFn(fn LogFunction) { - logger.LogFn(FatalLevel, fn) - logger.Exit(1) -} - -func (logger *Logger) PanicFn(fn LogFunction) { - logger.LogFn(PanicLevel, fn) -} - -func (logger *Logger) Logln(level Level, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Logln(level, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Traceln(args ...interface{}) { - logger.Logln(TraceLevel, args...) -} - -func (logger *Logger) Debugln(args ...interface{}) { - logger.Logln(DebugLevel, args...) -} - -func (logger *Logger) Infoln(args ...interface{}) { - logger.Logln(InfoLevel, args...) -} - -func (logger *Logger) Println(args ...interface{}) { - entry := logger.newEntry() - entry.Println(args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warnln(args ...interface{}) { - logger.Logln(WarnLevel, args...) -} - -func (logger *Logger) Warningln(args ...interface{}) { - logger.Warnln(args...) -} - -func (logger *Logger) Errorln(args ...interface{}) { - logger.Logln(ErrorLevel, args...) -} - -func (logger *Logger) Fatalln(args ...interface{}) { - logger.Logln(FatalLevel, args...) - logger.Exit(1) -} - -func (logger *Logger) Panicln(args ...interface{}) { - logger.Logln(PanicLevel, args...) -} - -func (logger *Logger) Exit(code int) { - runHandlers() - if logger.ExitFunc == nil { - logger.ExitFunc = os.Exit - } - logger.ExitFunc(code) -} - -//When file is opened with appending mode, it's safe to -//write concurrently to a file (within 4k message on Linux). -//In these cases user can choose to disable the lock. -func (logger *Logger) SetNoLock() { - logger.mu.Disable() -} - -func (logger *Logger) level() Level { - return Level(atomic.LoadUint32((*uint32)(&logger.Level))) -} - -// SetLevel sets the logger level. -func (logger *Logger) SetLevel(level Level) { - atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) -} - -// GetLevel returns the logger level. -func (logger *Logger) GetLevel() Level { - return logger.level() -} - -// AddHook adds a hook to the logger hooks. -func (logger *Logger) AddHook(hook Hook) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Hooks.Add(hook) -} - -// IsLevelEnabled checks if the log level of the logger is greater than the level param -func (logger *Logger) IsLevelEnabled(level Level) bool { - return logger.level() >= level -} - -// SetFormatter sets the logger formatter. -func (logger *Logger) SetFormatter(formatter Formatter) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Formatter = formatter -} - -// SetOutput sets the logger output. -func (logger *Logger) SetOutput(output io.Writer) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Out = output -} - -func (logger *Logger) SetReportCaller(reportCaller bool) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.ReportCaller = reportCaller -} - -// ReplaceHooks replaces the logger hooks and returns the old ones -func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { - logger.mu.Lock() - oldHooks := logger.Hooks - logger.Hooks = hooks - logger.mu.Unlock() - return oldHooks -} - -// SetBufferPool sets the logger buffer pool. -func (logger *Logger) SetBufferPool(pool BufferPool) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.BufferPool = pool -} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go deleted file mode 100644 index 2f16224cb9..0000000000 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ /dev/null @@ -1,186 +0,0 @@ -package logrus - -import ( - "fmt" - "log" - "strings" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint32 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - if b, err := level.MarshalText(); err == nil { - return string(b) - } else { - return "unknown" - } -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch strings.ToLower(lvl) { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - case "trace": - return TraceLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (level *Level) UnmarshalText(text []byte) error { - l, err := ParseLevel(string(text)) - if err != nil { - return err - } - - *level = l - - return nil -} - -func (level Level) MarshalText() ([]byte, error) { - switch level { - case TraceLevel: - return []byte("trace"), nil - case DebugLevel: - return []byte("debug"), nil - case InfoLevel: - return []byte("info"), nil - case WarnLevel: - return []byte("warning"), nil - case ErrorLevel: - return []byte("error"), nil - case FatalLevel: - return []byte("fatal"), nil - case PanicLevel: - return []byte("panic"), nil - } - - return nil, fmt.Errorf("not a valid logrus level %d", level) -} - -// A constant exposing all logging levels -var AllLevels = []Level{ - PanicLevel, - FatalLevel, - ErrorLevel, - WarnLevel, - InfoLevel, - DebugLevel, - TraceLevel, -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel - // TraceLevel level. Designates finer-grained informational events than the Debug. - TraceLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var ( - _ StdLogger = &log.Logger{} - _ StdLogger = &Entry{} - _ StdLogger = &Logger{} -) - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} - -// The FieldLogger interface generalizes the Entry and Logger types -type FieldLogger interface { - WithField(key string, value interface{}) *Entry - WithFields(fields Fields) *Entry - WithError(err error) *Entry - - Debugf(format string, args ...interface{}) - Infof(format string, args ...interface{}) - Printf(format string, args ...interface{}) - Warnf(format string, args ...interface{}) - Warningf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) - Panicf(format string, args ...interface{}) - - Debug(args ...interface{}) - Info(args ...interface{}) - Print(args ...interface{}) - Warn(args ...interface{}) - Warning(args ...interface{}) - Error(args ...interface{}) - Fatal(args ...interface{}) - Panic(args ...interface{}) - - Debugln(args ...interface{}) - Infoln(args ...interface{}) - Println(args ...interface{}) - Warnln(args ...interface{}) - Warningln(args ...interface{}) - Errorln(args ...interface{}) - Fatalln(args ...interface{}) - Panicln(args ...interface{}) - - // IsDebugEnabled() bool - // IsInfoEnabled() bool - // IsWarnEnabled() bool - // IsErrorEnabled() bool - // IsFatalEnabled() bool - // IsPanicEnabled() bool -} - -// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is -// here for consistancy. Do not use. Use Logger or Entry instead. -type Ext1FieldLogger interface { - FieldLogger - Tracef(format string, args ...interface{}) - Trace(args ...interface{}) - Traceln(args ...interface{}) -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go deleted file mode 100644 index 2403de9819..0000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine - -package logrus - -import ( - "io" -) - -func checkIfTerminal(w io.Writer) bool { - return true -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go deleted file mode 100644 index 499789984d..0000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build darwin dragonfly freebsd netbsd openbsd -// +build !js - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TIOCGETA - -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go deleted file mode 100644 index ebdae3ec62..0000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_js.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build js - -package logrus - -func isTerminal(fd int) bool { - return false -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go deleted file mode 100644 index 97af92c68e..0000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build js nacl plan9 - -package logrus - -import ( - "io" -) - -func checkIfTerminal(w io.Writer) bool { - return false -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go deleted file mode 100644 index 3293fb3caa..0000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !appengine,!js,!windows,!nacl,!plan9 - -package logrus - -import ( - "io" - "os" -) - -func checkIfTerminal(w io.Writer) bool { - switch v := w.(type) { - case *os.File: - return isTerminal(int(v.Fd())) - default: - return false - } -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go deleted file mode 100644 index f6710b3bd0..0000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package logrus - -import ( - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermio(fd, unix.TCGETA) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go deleted file mode 100644 index 04748b8515..0000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build linux aix zos -// +build !js - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS - -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go deleted file mode 100644 index 2879eb50ea..0000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !appengine,!js,windows - -package logrus - -import ( - "io" - "os" - - "golang.org/x/sys/windows" -) - -func checkIfTerminal(w io.Writer) bool { - switch v := w.(type) { - case *os.File: - handle := windows.Handle(v.Fd()) - var mode uint32 - if err := windows.GetConsoleMode(handle, &mode); err != nil { - return false - } - mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING - if err := windows.SetConsoleMode(handle, mode); err != nil { - return false - } - return true - } - return false -} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go deleted file mode 100644 index be2c6efe5e..0000000000 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,339 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "os" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "time" - "unicode/utf8" -) - -const ( - red = 31 - yellow = 33 - blue = 36 - gray = 37 -) - -var baseTimestamp time.Time - -func init() { - baseTimestamp = time.Now() -} - -// TextFormatter formats logs into text -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Force quoting of all values - ForceQuote bool - - // DisableQuote disables quoting for all values. - // DisableQuote will have a lower priority than ForceQuote. - // If both of them are set to true, quote will be forced on all values. - DisableQuote bool - - // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ - EnvironmentOverrideColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed. - // The format to use is the same than for time.Format or time.Parse from the standard - // library. - // The standard Library already provides a set of predefined format. - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool - - // The keys sorting function, when uninitialized it uses sort.Strings. - SortingFunc func([]string) - - // Disables the truncation of the level text to 4 characters. - DisableLevelTruncation bool - - // PadLevelText Adds padding the level text so that all the levels output at the same length - // PadLevelText is a superset of the DisableLevelTruncation option - PadLevelText bool - - // QuoteEmptyFields will wrap empty fields in quotes if true - QuoteEmptyFields bool - - // Whether the logger's out is to a terminal - isTerminal bool - - // FieldMap allows users to customize the names of keys for default fields. - // As an example: - // formatter := &TextFormatter{ - // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", - // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message"}} - FieldMap FieldMap - - // CallerPrettyfier can be set by the user to modify the content - // of the function and file keys in the data when ReportCaller is - // activated. If any of the returned value is the empty string the - // corresponding key will be removed from fields. - CallerPrettyfier func(*runtime.Frame) (function string, file string) - - terminalInitOnce sync.Once - - // The max length of the level text, generated dynamically on init - levelTextMaxLength int -} - -func (f *TextFormatter) init(entry *Entry) { - if entry.Logger != nil { - f.isTerminal = checkIfTerminal(entry.Logger.Out) - } - // Get the max length of the level text - for _, level := range AllLevels { - levelTextLength := utf8.RuneCount([]byte(level.String())) - if levelTextLength > f.levelTextMaxLength { - f.levelTextMaxLength = levelTextLength - } - } -} - -func (f *TextFormatter) isColored() bool { - isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) - - if f.EnvironmentOverrideColors { - switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { - case ok && force != "0": - isColored = true - case ok && force == "0", os.Getenv("CLICOLOR") == "0": - isColored = false - } - } - - return isColored && !f.DisableColors -} - -// Format renders a single log entry -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields) - for k, v := range entry.Data { - data[k] = v - } - prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - - var funcVal, fileVal string - - fixedKeys := make([]string, 0, 4+len(data)) - if !f.DisableTimestamp { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) - } - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) - if entry.Message != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) - } - if entry.err != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) - } - if entry.HasCaller() { - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } else { - funcVal = entry.Caller.Function - fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - } - - if funcVal != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) - } - if fileVal != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) - } - } - - if !f.DisableSorting { - if f.SortingFunc == nil { - sort.Strings(keys) - fixedKeys = append(fixedKeys, keys...) - } else { - if !f.isColored() { - fixedKeys = append(fixedKeys, keys...) - f.SortingFunc(fixedKeys) - } else { - f.SortingFunc(keys) - } - } - } else { - fixedKeys = append(fixedKeys, keys...) - } - - var b *bytes.Buffer - if entry.Buffer != nil { - b = entry.Buffer - } else { - b = &bytes.Buffer{} - } - - f.terminalInitOnce.Do(func() { f.init(entry) }) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = defaultTimestampFormat - } - if f.isColored() { - f.printColored(b, entry, keys, data, timestampFormat) - } else { - - for _, key := range fixedKeys { - var value interface{} - switch { - case key == f.FieldMap.resolve(FieldKeyTime): - value = entry.Time.Format(timestampFormat) - case key == f.FieldMap.resolve(FieldKeyLevel): - value = entry.Level.String() - case key == f.FieldMap.resolve(FieldKeyMsg): - value = entry.Message - case key == f.FieldMap.resolve(FieldKeyLogrusError): - value = entry.err - case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): - value = funcVal - case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): - value = fileVal - default: - value = data[key] - } - f.appendKeyValue(b, key, value) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { - var levelColor int - switch entry.Level { - case DebugLevel, TraceLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - case InfoLevel: - levelColor = blue - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String()) - if !f.DisableLevelTruncation && !f.PadLevelText { - levelText = levelText[0:4] - } - if f.PadLevelText { - // Generates the format string used in the next line, for example "%-6s" or "%-7s". - // Based on the max level text length. - formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" - // Formats the level text by appending spaces up to the max length, for example: - // - "INFO " - // - "WARNING" - levelText = fmt.Sprintf(formatString, levelText) - } - - // Remove a single newline if it already exists in the message to keep - // the behavior of logrus text_formatter the same as the stdlib log package - entry.Message = strings.TrimSuffix(entry.Message, "\n") - - caller := "" - if entry.HasCaller() { - funcVal := fmt.Sprintf("%s()", entry.Caller.Function) - fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } - - if fileVal == "" { - caller = funcVal - } else if funcVal == "" { - caller = fileVal - } else { - caller = fileVal + " " + funcVal - } - } - - switch { - case f.DisableTimestamp: - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) - case !f.FullTimestamp: - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) - default: - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) - } - for _, k := range keys { - v := data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) - f.appendValue(b, v) - } -} - -func (f *TextFormatter) needsQuoting(text string) bool { - if f.ForceQuote { - return true - } - if f.QuoteEmptyFields && len(text) == 0 { - return true - } - if f.DisableQuote { - return false - } - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { - return true - } - } - return false -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - if b.Len() > 0 { - b.WriteByte(' ') - } - b.WriteString(key) - b.WriteByte('=') - f.appendValue(b, value) -} - -func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { - stringVal, ok := value.(string) - if !ok { - stringVal = fmt.Sprint(value) - } - - if !f.needsQuoting(stringVal) { - b.WriteString(stringVal) - } else { - b.WriteString(fmt.Sprintf("%q", stringVal)) - } -} diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go deleted file mode 100644 index 074fd4b8bd..0000000000 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ /dev/null @@ -1,102 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" - "strings" -) - -// Writer at INFO level. See WriterLevel for details. -func (logger *Logger) Writer() *io.PipeWriter { - return logger.WriterLevel(InfoLevel) -} - -// WriterLevel returns an io.Writer that can be used to write arbitrary text to -// the logger at the given log level. Each line written to the writer will be -// printed in the usual way using formatters and hooks. The writer is part of an -// io.Pipe and it is the callers responsibility to close the writer when done. -// This can be used to override the standard library logger easily. -func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { - return NewEntry(logger).WriterLevel(level) -} - -// Writer returns an io.Writer that writes to the logger at the info log level -func (entry *Entry) Writer() *io.PipeWriter { - return entry.WriterLevel(InfoLevel) -} - -// WriterLevel returns an io.Writer that writes to the logger at the given log level -func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { - reader, writer := io.Pipe() - - var printFunc func(args ...interface{}) - - // Determine which log function to use based on the specified log level - switch level { - case TraceLevel: - printFunc = entry.Trace - case DebugLevel: - printFunc = entry.Debug - case InfoLevel: - printFunc = entry.Info - case WarnLevel: - printFunc = entry.Warn - case ErrorLevel: - printFunc = entry.Error - case FatalLevel: - printFunc = entry.Fatal - case PanicLevel: - printFunc = entry.Panic - default: - printFunc = entry.Print - } - - // Start a new goroutine to scan the input and write it to the logger using the specified print function. - // It splits the input into chunks of up to 64KB to avoid buffer overflows. - go entry.writerScanner(reader, printFunc) - - // Set a finalizer function to close the writer when it is garbage collected - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -// writerScanner scans the input from the reader and writes it to the logger -func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { - scanner := bufio.NewScanner(reader) - - // Set the buffer size to the maximum token size to avoid buffer overflows - scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) - - // Define a split function to split the input into chunks of up to 64KB - chunkSize := bufio.MaxScanTokenSize // 64KB - splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { - if len(data) >= chunkSize { - return chunkSize, data[:chunkSize], nil - } - - return bufio.ScanLines(data, atEOF) - } - - // Use the custom split function to split the input - scanner.Split(splitFunc) - - // Scan the input and write it to the logger using the specified print function - for scanner.Scan() { - printFunc(strings.TrimRight(scanner.Text(), "\r\n")) - } - - // If there was an error while scanning the input, log an error - if err := scanner.Err(); err != nil { - entry.Errorf("Error while reading from Writer: %s", err) - } - - // Close the reader when we are done - reader.Close() -} - -// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/vendor/github.com/sourcegraph/conc/.golangci.yml b/vendor/github.com/sourcegraph/conc/.golangci.yml deleted file mode 100644 index ae65a760a9..0000000000 --- a/vendor/github.com/sourcegraph/conc/.golangci.yml +++ /dev/null @@ -1,11 +0,0 @@ -linters: - disable-all: true - enable: - - errcheck - - godot - - gosimple - - govet - - ineffassign - - staticcheck - - typecheck - - unused diff --git a/vendor/github.com/sourcegraph/conc/LICENSE b/vendor/github.com/sourcegraph/conc/LICENSE deleted file mode 100644 index 1081f4ef4a..0000000000 --- a/vendor/github.com/sourcegraph/conc/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 Sourcegraph - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/sourcegraph/conc/README.md b/vendor/github.com/sourcegraph/conc/README.md deleted file mode 100644 index 1c87c3c969..0000000000 --- a/vendor/github.com/sourcegraph/conc/README.md +++ /dev/null @@ -1,464 +0,0 @@ -![conch](https://user-images.githubusercontent.com/12631702/210295964-785cc63d-d697-420c-99ff-f492eb81dec9.svg) - -# `conc`: better structured concurrency for go - -[![Go Reference](https://pkg.go.dev/badge/github.com/sourcegraph/conc.svg)](https://pkg.go.dev/github.com/sourcegraph/conc) -[![Sourcegraph](https://img.shields.io/badge/view%20on-sourcegraph-A112FE?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAEZklEQVRoQ+2aXWgUZxSG3292sxtNN43BhBakFPyhxSujRSxiU1pr7SaGXqgUxOIEW0IFkeYighYUxAuLUlq0lrq2iCDpjWtmFVtoG6QVNOCFVShVLyxIk0DVjZLMxt3xTGTccd2ZOd/8JBHci0CY9zvnPPN+/7sCIXwKavOwAcy2QgngQiIztDSE0OwQlDPYR1ebiaH6J5kZChyfW12gRG4QVgGTBfMchMbFP9Sn5nlZL2D0JjLD6710lc+z0NfqSGTXQRQ4bX07Mq423yoBL3OSyHSvUxirMuaEvgbJWrdcvkHMoJwxYuq4INUhyuWvQa1jvdMGxAvCxJlyEC9XOBCWL04wwRzpbDoDQ7wfZJzIQLi5Eggk6DiRhZgWIAbE3NrM4A3LPT8Q7UgqAqLqTmLSHLGPkyzG/qXEczhd0q6RH+zaSBfaUoc4iQx19pIClIscrTkNZzG6gd7qMY6eC2Hqyo705ZfTf+eqJmhMzcSbYtQpOXc92ZsZjLVAL4YNUQbJ5Ttg4CQrQdGYj44Xr9m1XJCzmZusFDJOWNpHjmh5x624a2ZFtOKDVL+uNo2TuXE3bZQQZUf8gtgqP31uI94Z/rMqix+IGiRfWw3xN9dCgVx+L3WrHm4Dju6PXz/EkjuXJ6R+IGgyOE1TbZqTq9y1eo0EZo7oMo1ktPu3xjHvuiLT5AFNszUyDULtWpzE2/fEsey8O5TbWuGWwxrs5rS7nFNMWJrNh2No74s9Ec4vRNmRRzPXMP19fBMSVsGcOJ98G8N3Wl2gXcbTjbX7vUBxLaeASDQCm5Cu/0E2tvtb0Ea+BowtskFD0wvlc6Rf2M+Jx7dTu7ubFr2dnKDRaMQe2v/tcIrNB7FH0O50AcrBaApmRDVwFO31ql3pD8QW4dP0feNwl/Q+kFEtRyIGyaWXnpy1OO0qNJWHo1y6iCmAGkBb/Ru+HenDWIF2mo4r8G+tRRzoniSn2uqFLxANhe9LKHVyTbz6egk9+x5w5fK6ulSNNMhZ/Feno+GebLZV6isTTa6k5qNl5RnZ5u56Ib6SBvFzaWBBVFZzvnERWlt/Cg4l27XChLCqFyLekjhy6xJyoytgjPf7opIB8QPx7sYFiMXHPGt76m741MhCKMZfng0nBOIjmoJPsLqWHwgFpe6V6qtfcopxveR2Oy+J0ntIN/zCWkf8QNAJ7y6d8Bq4lxLc2/qJl5K7t432XwcqX5CrI34gzATWuYILQtdQPyePDK3iuOekCR3Efjhig1B1Uq5UoXEEoZX7d1q535J5S9VOeFyYyEBku5XTMXXKQTToX5Rg7OI44nbW5oKYeYK4EniMeF0YFNSmb+grhc84LyRCEP1/OurOcipCQbKxDeK2V5FcVyIDMQvsgz5gwFhcWWwKyRlvQ3gv29RwWoDYAbIofNyBxI9eDlQ+n3YgsgCWnr4MStGXQXmv9pF2La/k3OccV54JEBM4yp9EsXa/3LfO0dGPcYq0Y7DfZB8nJzZw2rppHgKgVHs8L5wvRwAAAABJRU5ErkJggg==)](https://sourcegraph.com/github.com/sourcegraph/conc) -[![Go Report Card](https://goreportcard.com/badge/github.com/sourcegraph/conc)](https://goreportcard.com/report/github.com/sourcegraph/conc) -[![codecov](https://codecov.io/gh/sourcegraph/conc/branch/main/graph/badge.svg?token=MQZTEA1QWT)](https://codecov.io/gh/sourcegraph/conc) -[![Discord](https://img.shields.io/badge/discord-chat-%235765F2)](https://discord.gg/bvXQXmtRjN) - -`conc` is your toolbelt for structured concurrency in go, making common tasks -easier and safer. - -```sh -go get github.com/sourcegraph/conc -``` - -# At a glance - -- Use [`conc.WaitGroup`](https://pkg.go.dev/github.com/sourcegraph/conc#WaitGroup) if you just want a safer version of `sync.WaitGroup` -- Use [`pool.Pool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool) if you want a concurrency-limited task runner -- Use [`pool.ResultPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultPool) if you want a concurrent task runner that collects task results -- Use [`pool.(Result)?ErrorPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool) if your tasks are fallible -- Use [`pool.(Result)?ContextPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ContextPool) if your tasks should be canceled on failure -- Use [`stream.Stream`](https://pkg.go.dev/github.com/sourcegraph/conc/stream#Stream) if you want to process an ordered stream of tasks in parallel with serial callbacks -- Use [`iter.Map`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#Map) if you want to concurrently map a slice -- Use [`iter.ForEach`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#ForEach) if you want to concurrently iterate over a slice -- Use [`panics.Catcher`](https://pkg.go.dev/github.com/sourcegraph/conc/panics#Catcher) if you want to catch panics in your own goroutines - -All pools are created with -[`pool.New()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#New) -or -[`pool.NewWithResults[T]()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#NewWithResults), -then configured with methods: - -- [`p.WithMaxGoroutines()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.MaxGoroutines) configures the maximum number of goroutines in the pool -- [`p.WithErrors()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithErrors) configures the pool to run tasks that return errors -- [`p.WithContext(ctx)`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithContext) configures the pool to run tasks that should be canceled on first error -- [`p.WithFirstError()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool.WithFirstError) configures error pools to only keep the first returned error rather than an aggregated error -- [`p.WithCollectErrored()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultContextPool.WithCollectErrored) configures result pools to collect results even when the task errored - -# Goals - -The main goals of the package are: -1) Make it harder to leak goroutines -2) Handle panics gracefully -3) Make concurrent code easier to read - -## Goal #1: Make it harder to leak goroutines - -A common pain point when working with goroutines is cleaning them up. It's -really easy to fire off a `go` statement and fail to properly wait for it to -complete. - -`conc` takes the opinionated stance that all concurrency should be scoped. -That is, goroutines should have an owner and that owner should always -ensure that its owned goroutines exit properly. - -In `conc`, the owner of a goroutine is always a `conc.WaitGroup`. Goroutines -are spawned in a `WaitGroup` with `(*WaitGroup).Go()`, and -`(*WaitGroup).Wait()` should always be called before the `WaitGroup` goes out -of scope. - -In some cases, you might want a spawned goroutine to outlast the scope of the -caller. In that case, you could pass a `WaitGroup` into the spawning function. - -```go -func main() { - var wg conc.WaitGroup - defer wg.Wait() - - startTheThing(&wg) -} - -func startTheThing(wg *conc.WaitGroup) { - wg.Go(func() { ... }) -} -``` - -For some more discussion on why scoped concurrency is nice, check out [this -blog -post](https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/). - -## Goal #2: Handle panics gracefully - -A frequent problem with goroutines in long-running applications is handling -panics. A goroutine spawned without a panic handler will crash the whole process -on panic. This is usually undesirable. - -However, if you do add a panic handler to a goroutine, what do you do with the -panic once you catch it? Some options: -1) Ignore it -2) Log it -3) Turn it into an error and return that to the goroutine spawner -4) Propagate the panic to the goroutine spawner - -Ignoring panics is a bad idea since panics usually mean there is actually -something wrong and someone should fix it. - -Just logging panics isn't great either because then there is no indication to the spawner -that something bad happened, and it might just continue on as normal even though your -program is in a really bad state. - -Both (3) and (4) are reasonable options, but both require the goroutine to have -an owner that can actually receive the message that something went wrong. This -is generally not true with a goroutine spawned with `go`, but in the `conc` -package, all goroutines have an owner that must collect the spawned goroutine. -In the conc package, any call to `Wait()` will panic if any of the spawned goroutines -panicked. Additionally, it decorates the panic value with a stacktrace from the child -goroutine so that you don't lose information about what caused the panic. - -Doing this all correctly every time you spawn something with `go` is not -trivial and it requires a lot of boilerplate that makes the important parts of -the code more difficult to read, so `conc` does this for you. - - - - - - - - - - -
    stdlibconc
    - -```go -type caughtPanicError struct { - val any - stack []byte -} - -func (e *caughtPanicError) Error() string { - return fmt.Sprintf( - "panic: %q\n%s", - e.val, - string(e.stack) - ) -} - -func main() { - done := make(chan error) - go func() { - defer func() { - if v := recover(); v != nil { - done <- &caughtPanicError{ - val: v, - stack: debug.Stack() - } - } else { - done <- nil - } - }() - doSomethingThatMightPanic() - }() - err := <-done - if err != nil { - panic(err) - } -} -``` - - -```go -func main() { - var wg conc.WaitGroup - wg.Go(doSomethingThatMightPanic) - // panics with a nice stacktrace - wg.Wait() -} -``` -
    - -## Goal #3: Make concurrent code easier to read - -Doing concurrency correctly is difficult. Doing it in a way that doesn't -obfuscate what the code is actually doing is more difficult. The `conc` package -attempts to make common operations easier by abstracting as much boilerplate -complexity as possible. - -Want to run a set of concurrent tasks with a bounded set of goroutines? Use -`pool.New()`. Want to process an ordered stream of results concurrently, but -still maintain order? Try `stream.New()`. What about a concurrent map over -a slice? Take a peek at `iter.Map()`. - -Browse some examples below for some comparisons with doing these by hand. - -# Examples - -Each of these examples forgoes propagating panics for simplicity. To see -what kind of complexity that would add, check out the "Goal #2" header above. - -Spawn a set of goroutines and waiting for them to finish: - - - - - - - - - - -
    stdlibconc
    - -```go -func main() { - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - // crashes on panic! - doSomething() - }() - } - wg.Wait() -} -``` - - -```go -func main() { - var wg conc.WaitGroup - for i := 0; i < 10; i++ { - wg.Go(doSomething) - } - wg.Wait() -} -``` -
    - -Process each element of a stream in a static pool of goroutines: - - - - - - - - - - -
    stdlibconc
    - -```go -func process(stream chan int) { - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for elem := range stream { - handle(elem) - } - }() - } - wg.Wait() -} -``` - - -```go -func process(stream chan int) { - p := pool.New().WithMaxGoroutines(10) - for elem := range stream { - elem := elem - p.Go(func() { - handle(elem) - }) - } - p.Wait() -} -``` -
    - -Process each element of a slice in a static pool of goroutines: - - - - - - - - - - -
    stdlibconc
    - -```go -func process(values []int) { - feeder := make(chan int, 8) - - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for elem := range feeder { - handle(elem) - } - }() - } - - for _, value := range values { - feeder <- value - } - close(feeder) - wg.Wait() -} -``` - - -```go -func process(values []int) { - iter.ForEach(values, handle) -} -``` -
    - -Concurrently map a slice: - - - - - - - - - - -
    stdlibconc
    - -```go -func concMap( - input []int, - f func(int) int, -) []int { - res := make([]int, len(input)) - var idx atomic.Int64 - - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - for { - i := int(idx.Add(1) - 1) - if i >= len(input) { - return - } - - res[i] = f(input[i]) - } - }() - } - wg.Wait() - return res -} -``` - - -```go -func concMap( - input []int, - f func(*int) int, -) []int { - return iter.Map(input, f) -} -``` -
    - -Process an ordered stream concurrently: - - - - - - - - - - - -
    stdlibconc
    - -```go -func mapStream( - in chan int, - out chan int, - f func(int) int, -) { - tasks := make(chan func()) - taskResults := make(chan chan int) - - // Worker goroutines - var workerWg sync.WaitGroup - for i := 0; i < 10; i++ { - workerWg.Add(1) - go func() { - defer workerWg.Done() - for task := range tasks { - task() - } - }() - } - - // Ordered reader goroutines - var readerWg sync.WaitGroup - readerWg.Add(1) - go func() { - defer readerWg.Done() - for result := range taskResults { - item := <-result - out <- item - } - }() - - // Feed the workers with tasks - for elem := range in { - resultCh := make(chan int, 1) - taskResults <- resultCh - tasks <- func() { - resultCh <- f(elem) - } - } - - // We've exhausted input. - // Wait for everything to finish - close(tasks) - workerWg.Wait() - close(taskResults) - readerWg.Wait() -} -``` - - -```go -func mapStream( - in chan int, - out chan int, - f func(int) int, -) { - s := stream.New().WithMaxGoroutines(10) - for elem := range in { - elem := elem - s.Go(func() stream.Callback { - res := f(elem) - return func() { out <- res } - }) - } - s.Wait() -} -``` -
    - -# Status - -This package is currently pre-1.0. There are likely to be minor breaking -changes before a 1.0 release as we stabilize the APIs and tweak defaults. -Please open an issue if you have questions, concerns, or requests that you'd -like addressed before the 1.0 release. Currently, a 1.0 is targeted for -March 2023. diff --git a/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go b/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go deleted file mode 100644 index 7087e32a8f..0000000000 --- a/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !go1.20 -// +build !go1.20 - -package multierror - -import "go.uber.org/multierr" - -var ( - Join = multierr.Combine -) diff --git a/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go b/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go deleted file mode 100644 index 39cff829ac..0000000000 --- a/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build go1.20 -// +build go1.20 - -package multierror - -import "errors" - -var ( - Join = errors.Join -) diff --git a/vendor/github.com/sourcegraph/conc/iter/iter.go b/vendor/github.com/sourcegraph/conc/iter/iter.go deleted file mode 100644 index 124b4f9400..0000000000 --- a/vendor/github.com/sourcegraph/conc/iter/iter.go +++ /dev/null @@ -1,85 +0,0 @@ -package iter - -import ( - "runtime" - "sync/atomic" - - "github.com/sourcegraph/conc" -) - -// defaultMaxGoroutines returns the default maximum number of -// goroutines to use within this package. -func defaultMaxGoroutines() int { return runtime.GOMAXPROCS(0) } - -// Iterator can be used to configure the behaviour of ForEach -// and ForEachIdx. The zero value is safe to use with reasonable -// defaults. -// -// Iterator is also safe for reuse and concurrent use. -type Iterator[T any] struct { - // MaxGoroutines controls the maximum number of goroutines - // to use on this Iterator's methods. - // - // If unset, MaxGoroutines defaults to runtime.GOMAXPROCS(0). - MaxGoroutines int -} - -// ForEach executes f in parallel over each element in input. -// -// It is safe to mutate the input parameter, which makes it -// possible to map in place. -// -// ForEach always uses at most runtime.GOMAXPROCS goroutines. -// It takes roughly 2µs to start up the goroutines and adds -// an overhead of roughly 50ns per element of input. For -// a configurable goroutine limit, use a custom Iterator. -func ForEach[T any](input []T, f func(*T)) { Iterator[T]{}.ForEach(input, f) } - -// ForEach executes f in parallel over each element in input, -// using up to the Iterator's configured maximum number of -// goroutines. -// -// It is safe to mutate the input parameter, which makes it -// possible to map in place. -// -// It takes roughly 2µs to start up the goroutines and adds -// an overhead of roughly 50ns per element of input. -func (iter Iterator[T]) ForEach(input []T, f func(*T)) { - iter.ForEachIdx(input, func(_ int, t *T) { - f(t) - }) -} - -// ForEachIdx is the same as ForEach except it also provides the -// index of the element to the callback. -func ForEachIdx[T any](input []T, f func(int, *T)) { Iterator[T]{}.ForEachIdx(input, f) } - -// ForEachIdx is the same as ForEach except it also provides the -// index of the element to the callback. -func (iter Iterator[T]) ForEachIdx(input []T, f func(int, *T)) { - if iter.MaxGoroutines == 0 { - // iter is a value receiver and is hence safe to mutate - iter.MaxGoroutines = defaultMaxGoroutines() - } - - numInput := len(input) - if iter.MaxGoroutines > numInput { - // No more concurrent tasks than the number of input items. - iter.MaxGoroutines = numInput - } - - var idx atomic.Int64 - // Create the task outside the loop to avoid extra closure allocations. - task := func() { - i := int(idx.Add(1) - 1) - for ; i < numInput; i = int(idx.Add(1) - 1) { - f(i, &input[i]) - } - } - - var wg conc.WaitGroup - for i := 0; i < iter.MaxGoroutines; i++ { - wg.Go(task) - } - wg.Wait() -} diff --git a/vendor/github.com/sourcegraph/conc/iter/map.go b/vendor/github.com/sourcegraph/conc/iter/map.go deleted file mode 100644 index efbe6bfaf1..0000000000 --- a/vendor/github.com/sourcegraph/conc/iter/map.go +++ /dev/null @@ -1,65 +0,0 @@ -package iter - -import ( - "sync" - - "github.com/sourcegraph/conc/internal/multierror" -) - -// Mapper is an Iterator with a result type R. It can be used to configure -// the behaviour of Map and MapErr. The zero value is safe to use with -// reasonable defaults. -// -// Mapper is also safe for reuse and concurrent use. -type Mapper[T, R any] Iterator[T] - -// Map applies f to each element of input, returning the mapped result. -// -// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable -// goroutine limit, use a custom Mapper. -func Map[T, R any](input []T, f func(*T) R) []R { - return Mapper[T, R]{}.Map(input, f) -} - -// Map applies f to each element of input, returning the mapped result. -// -// Map uses up to the configured Mapper's maximum number of goroutines. -func (m Mapper[T, R]) Map(input []T, f func(*T) R) []R { - res := make([]R, len(input)) - Iterator[T](m).ForEachIdx(input, func(i int, t *T) { - res[i] = f(t) - }) - return res -} - -// MapErr applies f to each element of the input, returning the mapped result -// and a combined error of all returned errors. -// -// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable -// goroutine limit, use a custom Mapper. -func MapErr[T, R any](input []T, f func(*T) (R, error)) ([]R, error) { - return Mapper[T, R]{}.MapErr(input, f) -} - -// MapErr applies f to each element of the input, returning the mapped result -// and a combined error of all returned errors. -// -// Map uses up to the configured Mapper's maximum number of goroutines. -func (m Mapper[T, R]) MapErr(input []T, f func(*T) (R, error)) ([]R, error) { - var ( - res = make([]R, len(input)) - errMux sync.Mutex - errs error - ) - Iterator[T](m).ForEachIdx(input, func(i int, t *T) { - var err error - res[i], err = f(t) - if err != nil { - errMux.Lock() - // TODO: use stdlib errors once multierrors land in go 1.20 - errs = multierror.Join(errs, err) - errMux.Unlock() - } - }) - return res, errs -} diff --git a/vendor/github.com/sourcegraph/conc/panics/panics.go b/vendor/github.com/sourcegraph/conc/panics/panics.go deleted file mode 100644 index abbed7fa05..0000000000 --- a/vendor/github.com/sourcegraph/conc/panics/panics.go +++ /dev/null @@ -1,102 +0,0 @@ -package panics - -import ( - "fmt" - "runtime" - "runtime/debug" - "sync/atomic" -) - -// Catcher is used to catch panics. You can execute a function with Try, -// which will catch any spawned panic. Try can be called any number of times, -// from any number of goroutines. Once all calls to Try have completed, you can -// get the value of the first panic (if any) with Recovered(), or you can just -// propagate the panic (re-panic) with Repanic(). -type Catcher struct { - recovered atomic.Pointer[Recovered] -} - -// Try executes f, catching any panic it might spawn. It is safe -// to call from multiple goroutines simultaneously. -func (p *Catcher) Try(f func()) { - defer p.tryRecover() - f() -} - -func (p *Catcher) tryRecover() { - if val := recover(); val != nil { - rp := NewRecovered(1, val) - p.recovered.CompareAndSwap(nil, &rp) - } -} - -// Repanic panics if any calls to Try caught a panic. It will panic with the -// value of the first panic caught, wrapped in a panics.Recovered with caller -// information. -func (p *Catcher) Repanic() { - if val := p.Recovered(); val != nil { - panic(val) - } -} - -// Recovered returns the value of the first panic caught by Try, or nil if -// no calls to Try panicked. -func (p *Catcher) Recovered() *Recovered { - return p.recovered.Load() -} - -// NewRecovered creates a panics.Recovered from a panic value and a collected -// stacktrace. The skip parameter allows the caller to skip stack frames when -// collecting the stacktrace. Calling with a skip of 0 means include the call to -// NewRecovered in the stacktrace. -func NewRecovered(skip int, value any) Recovered { - // 64 frames should be plenty - var callers [64]uintptr - n := runtime.Callers(skip+1, callers[:]) - return Recovered{ - Value: value, - Callers: callers[:n], - Stack: debug.Stack(), - } -} - -// Recovered is a panic that was caught with recover(). -type Recovered struct { - // The original value of the panic. - Value any - // The caller list as returned by runtime.Callers when the panic was - // recovered. Can be used to produce a more detailed stack information with - // runtime.CallersFrames. - Callers []uintptr - // The formatted stacktrace from the goroutine where the panic was recovered. - // Easier to use than Callers. - Stack []byte -} - -// String renders a human-readable formatting of the panic. -func (p *Recovered) String() string { - return fmt.Sprintf("panic: %v\nstacktrace:\n%s\n", p.Value, p.Stack) -} - -// AsError casts the panic into an error implementation. The implementation -// is unwrappable with the cause of the panic, if the panic was provided one. -func (p *Recovered) AsError() error { - if p == nil { - return nil - } - return &ErrRecovered{*p} -} - -// ErrRecovered wraps a panics.Recovered in an error implementation. -type ErrRecovered struct{ Recovered } - -var _ error = (*ErrRecovered)(nil) - -func (p *ErrRecovered) Error() string { return p.String() } - -func (p *ErrRecovered) Unwrap() error { - if err, ok := p.Value.(error); ok { - return err - } - return nil -} diff --git a/vendor/github.com/sourcegraph/conc/panics/try.go b/vendor/github.com/sourcegraph/conc/panics/try.go deleted file mode 100644 index 4ded92a1cb..0000000000 --- a/vendor/github.com/sourcegraph/conc/panics/try.go +++ /dev/null @@ -1,11 +0,0 @@ -package panics - -// Try executes f, catching and returning any panic it might spawn. -// -// The recovered panic can be propagated with panic(), or handled as a normal error with -// (*panics.Recovered).AsError(). -func Try(f func()) *Recovered { - var c Catcher - c.Try(f) - return c.Recovered() -} diff --git a/vendor/github.com/sourcegraph/conc/waitgroup.go b/vendor/github.com/sourcegraph/conc/waitgroup.go deleted file mode 100644 index 47b1bc1a5c..0000000000 --- a/vendor/github.com/sourcegraph/conc/waitgroup.go +++ /dev/null @@ -1,52 +0,0 @@ -package conc - -import ( - "sync" - - "github.com/sourcegraph/conc/panics" -) - -// NewWaitGroup creates a new WaitGroup. -func NewWaitGroup() *WaitGroup { - return &WaitGroup{} -} - -// WaitGroup is the primary building block for scoped concurrency. -// Goroutines can be spawned in the WaitGroup with the Go method, -// and calling Wait() will ensure that each of those goroutines exits -// before continuing. Any panics in a child goroutine will be caught -// and propagated to the caller of Wait(). -// -// The zero value of WaitGroup is usable, just like sync.WaitGroup. -// Also like sync.WaitGroup, it must not be copied after first use. -type WaitGroup struct { - wg sync.WaitGroup - pc panics.Catcher -} - -// Go spawns a new goroutine in the WaitGroup. -func (h *WaitGroup) Go(f func()) { - h.wg.Add(1) - go func() { - defer h.wg.Done() - h.pc.Try(f) - }() -} - -// Wait will block until all goroutines spawned with Go exit and will -// propagate any panics spawned in a child goroutine. -func (h *WaitGroup) Wait() { - h.wg.Wait() - - // Propagate a panic if we caught one from a child goroutine. - h.pc.Repanic() -} - -// WaitAndRecover will block until all goroutines spawned with Go exit and -// will return a *panics.Recovered if one of the child goroutines panics. -func (h *WaitGroup) WaitAndRecover() *panics.Recovered { - h.wg.Wait() - - // Return a recovered panic if we caught one from a child goroutine. - return h.pc.Recovered() -} diff --git a/vendor/github.com/spf13/afero/.gitignore b/vendor/github.com/spf13/afero/.gitignore deleted file mode 100644 index 9c1d986118..0000000000 --- a/vendor/github.com/spf13/afero/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -sftpfs/file1 -sftpfs/test/ diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt deleted file mode 100644 index 298f0e2665..0000000000 --- a/vendor/github.com/spf13/afero/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md deleted file mode 100644 index 3bafbfdfca..0000000000 --- a/vendor/github.com/spf13/afero/README.md +++ /dev/null @@ -1,442 +0,0 @@ -![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) - -A FileSystem Abstraction System for Go - -[![Test](https://github.com/spf13/afero/actions/workflows/test.yml/badge.svg)](https://github.com/spf13/afero/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -# Overview - -Afero is a filesystem framework providing a simple, uniform and universal API -interacting with any filesystem, as an abstraction layer providing interfaces, -types and methods. Afero has an exceptionally clean interface and simple design -without needless constructors or initialization methods. - -Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with afero while retaining all the power -and benefit of the os and ioutil packages. - -Afero provides significant improvements over using the os package alone, most -notably the ability to create mock and testing filesystems without relying on the disk. - -It is suitable for use in any situation where you would consider using the OS -package as it provides an additional abstraction that makes it easy to use a -memory backed file system during testing. It also adds support for the http -filesystem for full interoperability. - - -## Afero Features - -* A single consistent API for accessing a variety of filesystems -* Interoperation between a variety of file system types -* A set of interfaces to encourage and enforce interoperability between backends -* An atomic cross platform memory backed file system -* Support for compositional (union) file systems by combining multiple file systems acting as one -* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) -* A set of utility functions ported from io, ioutil & hugo to be afero aware -* Wrapper for go 1.16 filesystem abstraction `io/fs.FS` - -# Using Afero - -Afero is easy to use and easier to adopt. - -A few different ways you could use Afero: - -* Use the interfaces alone to define your own file system. -* Wrapper for the OS packages. -* Define different filesystems for different parts of your application. -* Use Afero for mock filesystems while testing - -## Step 1: Install Afero - -First use go get to install the latest version of the library. - - $ go get github.com/spf13/afero - -Next include Afero in your application. -```go -import "github.com/spf13/afero" -``` - -## Step 2: Declare a backend - -First define a package variable and set it to a pointer to a filesystem. -```go -var AppFs = afero.NewMemMapFs() - -or - -var AppFs = afero.NewOsFs() -``` -It is important to note that if you repeat the composite literal you -will be using a completely new and isolated filesystem. In the case of -OsFs it will still use the same underlying filesystem but will reduce -the ability to drop in other filesystems as desired. - -## Step 3: Use it like you would the OS package - -Throughout your application use any function and method like you normally -would. - -So if my application before had: -```go -os.Open("/tmp/foo") -``` -We would replace it with: -```go -AppFs.Open("/tmp/foo") -``` - -`AppFs` being the variable we defined above. - - -## List of all available functions - -File System Methods Available: -```go -Chmod(name string, mode os.FileMode) : error -Chown(name string, uid, gid int) : error -Chtimes(name string, atime time.Time, mtime time.Time) : error -Create(name string) : File, error -Mkdir(name string, perm os.FileMode) : error -MkdirAll(path string, perm os.FileMode) : error -Name() : string -Open(name string) : File, error -OpenFile(name string, flag int, perm os.FileMode) : File, error -Remove(name string) : error -RemoveAll(path string) : error -Rename(oldname, newname string) : error -Stat(name string) : os.FileInfo, error -``` -File Interfaces and Methods Available: -```go -io.Closer -io.Reader -io.ReaderAt -io.Seeker -io.Writer -io.WriterAt - -Name() : string -Readdir(count int) : []os.FileInfo, error -Readdirnames(n int) : []string, error -Stat() : os.FileInfo, error -Sync() : error -Truncate(size int64) : error -WriteString(s string) : ret int, err error -``` -In some applications it may make sense to define a new package that -simply exports the file system variable for easy access from anywhere. - -## Using Afero's utility functions - -Afero provides a set of functions to make it easier to use the underlying file systems. -These functions have been primarily ported from io & ioutil with some developed for Hugo. - -The afero utilities support all afero compatible backends. - -The list of utilities includes: - -```go -DirExists(path string) (bool, error) -Exists(path string) (bool, error) -FileContainsBytes(filename string, subslice []byte) (bool, error) -GetTempDir(subPath string) string -IsDir(path string) (bool, error) -IsEmpty(path string) (bool, error) -ReadDir(dirname string) ([]os.FileInfo, error) -ReadFile(filename string) ([]byte, error) -SafeWriteReader(path string, r io.Reader) (err error) -TempDir(dir, prefix string) (name string, err error) -TempFile(dir, prefix string) (f File, err error) -Walk(root string, walkFn filepath.WalkFunc) error -WriteFile(filename string, data []byte, perm os.FileMode) error -WriteReader(path string, r io.Reader) (err error) -``` -For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) - -They are available under two different approaches to use. You can either call -them directly where the first parameter of each function will be the file -system, or you can declare a new `Afero`, a custom type used to bind these -functions as methods to a given filesystem. - -### Calling utilities directly - -```go -fs := new(afero.MemMapFs) -f, err := afero.TempFile(fs,"", "ioutil-test") - -``` - -### Calling via Afero - -```go -fs := afero.NewMemMapFs() -afs := &afero.Afero{Fs: fs} -f, err := afs.TempFile("", "ioutil-test") -``` - -## Using Afero for Testing - -There is a large benefit to using a mock filesystem for testing. It has a -completely blank state every time it is initialized and can be easily -reproducible regardless of OS. You could create files to your heart’s content -and the file access would be fast while also saving you from all the annoying -issues with deleting temporary files, Windows file locking, etc. The MemMapFs -backend is perfect for testing. - -* Much faster than performing I/O operations on disk -* Avoid security issues and permissions -* Far more control. 'rm -rf /' with confidence -* Test setup is far more easier to do -* No test cleanup needed - -One way to accomplish this is to define a variable as mentioned above. -In your application this will be set to afero.NewOsFs() during testing you -can set it to afero.NewMemMapFs(). - -It wouldn't be uncommon to have each test initialize a blank slate memory -backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere -appropriate in my application code. This approach ensures that Tests are order -independent, with no test relying on the state left by an earlier test. - -Then in my tests I would initialize a new MemMapFs for each test: -```go -func TestExist(t *testing.T) { - appFS := afero.NewMemMapFs() - // create test files and directories - appFS.MkdirAll("src/a", 0755) - afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) - afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) - name := "src/c" - _, err := appFS.Stat(name) - if os.IsNotExist(err) { - t.Errorf("file \"%s\" does not exist.\n", name) - } -} -``` - -# Available Backends - -## Operating System Native - -### OsFs - -The first is simply a wrapper around the native OS calls. This makes it -very easy to use as all of the calls are the same as the existing OS -calls. It also makes it trivial to have your code use the OS during -operation and a mock filesystem during testing or as needed. - -```go -appfs := afero.NewOsFs() -appfs.MkdirAll("src/a", 0755) -``` - -## Memory Backed Storage - -### MemMapFs - -Afero also provides a fully atomic memory backed filesystem perfect for use in -mocking and to speed up unnecessary disk io when persistence isn’t -necessary. It is fully concurrent and will work within go routines -safely. - -```go -mm := afero.NewMemMapFs() -mm.MkdirAll("src/a", 0755) -``` - -#### InMemoryFile - -As part of MemMapFs, Afero also provides an atomic, fully concurrent memory -backed file implementation. This can be used in other memory backed file -systems with ease. Plans are to add a radix tree memory stored file -system using InMemoryFile. - -## Network Interfaces - -### SftpFs - -Afero has experimental support for secure file transfer protocol (sftp). Which can -be used to perform file operations over a encrypted channel. - -### GCSFs - -Afero has experimental support for Google Cloud Storage (GCS). You can either set the -`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in -`NewGcsFS` to configure access to your GCS bucket. - -Some known limitations of the existing implementation: -* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version. -* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version. -* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent. - - -## Filtering Backends - -### BasePathFs - -The BasePathFs restricts all operations to a given path within an Fs. -The given file name to the operations on this Fs will be prepended with -the base path before calling the source Fs. - -```go -bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") -``` - -### ReadOnlyFs - -A thin wrapper around the source Fs providing a read only view. - -```go -fs := afero.NewReadOnlyFs(afero.NewOsFs()) -_, err := fs.Create("/file.txt") -// err = syscall.EPERM -``` - -# RegexpFs - -A filtered view on file names, any file NOT matching -the passed regexp will be treated as non-existing. -Files not matching the regexp provided will not be created. -Directories are not filtered. - -```go -fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) -_, err := fs.Create("/file.html") -// err = syscall.ENOENT -``` - -### HttpFs - -Afero provides an http compatible backend which can wrap any of the existing -backends. - -The Http package requires a slightly specific version of Open which -returns an http.File type. - -Afero provides an httpFs file system which satisfies this requirement. -Any Afero FileSystem can be used as an httpFs. - -```go -httpFs := afero.NewHttpFs() -fileserver := http.FileServer(httpFs.Dir()) -http.Handle("/", fileserver) -``` - -## Composite Backends - -Afero provides the ability have two filesystems (or more) act as a single -file system. - -### CacheOnReadFs - -The CacheOnReadFs will lazily make copies of any accessed files from the base -layer into the overlay. Subsequent reads will be pulled from the overlay -directly permitting the request is within the cache duration of when it was -created in the overlay. - -If the base filesystem is writeable, any changes to files will be -done first to the base, then to the overlay layer. Write calls to open file -handles like `Write()` or `Truncate()` to the overlay first. - -To writing files to the overlay only, you can use the overlay Fs directly (not -via the union Fs). - -Cache files in the layer for the given time.Duration, a cache duration of 0 -means "forever" meaning the file will not be re-requested from the base ever. - -A read-only base will make the overlay also read-only but still copy files -from the base to the overlay when they're not present (or outdated) in the -caching layer. - -```go -base := afero.NewOsFs() -layer := afero.NewMemMapFs() -ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) -``` - -### CopyOnWriteFs() - -The CopyOnWriteFs is a read only base file system with a potentially -writeable layer on top. - -Read operations will first look in the overlay and if not found there, will -serve the file from the base. - -Changes to the file system will only be made in the overlay. - -Any attempt to modify a file found only in the base will copy the file to the -overlay layer before modification (including opening a file with a writable -handle). - -Removing and Renaming files present only in the base layer is not currently -permitted. If a file is present in the base layer and the overlay, only the -overlay will be removed/renamed. - -```go - base := afero.NewOsFs() - roBase := afero.NewReadOnlyFs(base) - ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) - - fh, _ = ufs.Create("/home/test/file2.txt") - fh.WriteString("This is a test") - fh.Close() -``` - -In this example all write operations will only occur in memory (MemMapFs) -leaving the base filesystem (OsFs) untouched. - - -## Desired/possible backends - -The following is a short list of possible backends we hope someone will -implement: - -* SSH -* S3 - -# About the project - -## What's in the name - -Afero comes from the latin roots Ad-Facere. - -**"Ad"** is a prefix meaning "to". - -**"Facere"** is a form of the root "faciō" making "make or do". - -The literal meaning of afero is "to make" or "to do" which seems very fitting -for a library that allows one to make files and directories and do things with them. - -The English word that shares the same roots as Afero is "affair". Affair shares -the same concept but as a noun it means "something that is made or done" or "an -object of a particular type". - -It's also nice that unlike some of my other libraries (hugo, cobra, viper) it -Googles very well. - -## Release Notes - -See the [Releases Page](https://github.com/spf13/afero/releases). - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13) -* [jaqx0r](https://github.com/jaqx0r) -* [mbertschler](https://github.com/mbertschler) -* [xor-gate](https://github.com/xor-gate) - -## License - -Afero is released under the Apache 2.0 license. See -[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go deleted file mode 100644 index 39f6585209..0000000000 --- a/vendor/github.com/spf13/afero/afero.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package afero provides types and methods for interacting with the filesystem, -// as an abstraction layer. - -// Afero also provides a few implementations that are mostly interoperable. One that -// uses the operating system filesystem, one that uses memory to store files -// (cross platform) and an interface that should be implemented if you want to -// provide your own filesystem. - -package afero - -import ( - "errors" - "io" - "os" - "time" -) - -type Afero struct { - Fs -} - -// File represents a file in the filesystem. -type File interface { - io.Closer - io.Reader - io.ReaderAt - io.Seeker - io.Writer - io.WriterAt - - Name() string - Readdir(count int) ([]os.FileInfo, error) - Readdirnames(n int) ([]string, error) - Stat() (os.FileInfo, error) - Sync() error - Truncate(size int64) error - WriteString(s string) (ret int, err error) -} - -// Fs is the filesystem interface. -// -// Any simulated or real filesystem should implement this interface. -type Fs interface { - // Create creates a file in the filesystem, returning the file and an - // error, if any happens. - Create(name string) (File, error) - - // Mkdir creates a directory in the filesystem, return an error if any - // happens. - Mkdir(name string, perm os.FileMode) error - - // MkdirAll creates a directory path and all parents that does not exist - // yet. - MkdirAll(path string, perm os.FileMode) error - - // Open opens a file, returning it or an error, if any happens. - Open(name string) (File, error) - - // OpenFile opens a file using the given flags and the given mode. - OpenFile(name string, flag int, perm os.FileMode) (File, error) - - // Remove removes a file identified by name, returning an error, if any - // happens. - Remove(name string) error - - // RemoveAll removes a directory path and any children it contains. It - // does not fail if the path does not exist (return nil). - RemoveAll(path string) error - - // Rename renames a file. - Rename(oldname, newname string) error - - // Stat returns a FileInfo describing the named file, or an error, if any - // happens. - Stat(name string) (os.FileInfo, error) - - // The name of this FileSystem - Name() string - - // Chmod changes the mode of the named file to mode. - Chmod(name string, mode os.FileMode) error - - // Chown changes the uid and gid of the named file. - Chown(name string, uid, gid int) error - - // Chtimes changes the access and modification times of the named file - Chtimes(name string, atime time.Time, mtime time.Time) error -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("out of range") - ErrTooLarge = errors.New("too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml deleted file mode 100644 index 65e20e8ca3..0000000000 --- a/vendor/github.com/spf13/afero/appveyor.yml +++ /dev/null @@ -1,10 +0,0 @@ -# This currently does nothing. We have moved to GitHub action, but this is kept -# until spf13 has disabled this project in AppVeyor. -version: '{build}' -clone_folder: C:\gopath\src\github.com\spf13\afero -environment: - GOPATH: C:\gopath -build_script: -- cmd: >- - go version - diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go deleted file mode 100644 index 2e72793a3e..0000000000 --- a/vendor/github.com/spf13/afero/basepath.go +++ /dev/null @@ -1,222 +0,0 @@ -package afero - -import ( - "io/fs" - "os" - "path/filepath" - "runtime" - "strings" - "time" -) - -var ( - _ Lstater = (*BasePathFs)(nil) - _ fs.ReadDirFile = (*BasePathFile)(nil) -) - -// The BasePathFs restricts all operations to a given path within an Fs. -// The given file name to the operations on this Fs will be prepended with -// the base path before calling the base Fs. -// Any file name (after filepath.Clean()) outside this base path will be -// treated as non existing file. -// -// Note that it does not clean the error messages on return, so you may -// reveal the real path on errors. -type BasePathFs struct { - source Fs - path string -} - -type BasePathFile struct { - File - path string -} - -func (f *BasePathFile) Name() string { - sourcename := f.File.Name() - return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) -} - -func (f *BasePathFile) ReadDir(n int) ([]fs.DirEntry, error) { - if rdf, ok := f.File.(fs.ReadDirFile); ok { - return rdf.ReadDir(n) - } - return readDirFile{f.File}.ReadDir(n) -} - -func NewBasePathFs(source Fs, path string) Fs { - return &BasePathFs{source: source, path: path} -} - -// on a file outside the base path it returns the given file name and an error, -// else the given file with the base path prepended -func (b *BasePathFs) RealPath(name string) (path string, err error) { - if err := validateBasePathName(name); err != nil { - return name, err - } - - bpath := filepath.Clean(b.path) - path = filepath.Clean(filepath.Join(bpath, name)) - if !strings.HasPrefix(path, bpath) { - return name, os.ErrNotExist - } - - return path, nil -} - -func validateBasePathName(name string) error { - if runtime.GOOS != "windows" { - // Not much to do here; - // the virtual file paths all look absolute on *nix. - return nil - } - - // On Windows a common mistake would be to provide an absolute OS path - // We could strip out the base part, but that would not be very portable. - if filepath.IsAbs(name) { - return os.ErrNotExist - } - - return nil -} - -func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chtimes", Path: name, Err: err} - } - return b.source.Chtimes(name, atime, mtime) -} - -func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chmod", Path: name, Err: err} - } - return b.source.Chmod(name, mode) -} - -func (b *BasePathFs) Chown(name string, uid, gid int) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chown", Path: name, Err: err} - } - return b.source.Chown(name, uid, gid) -} - -func (b *BasePathFs) Name() string { - return "BasePathFs" -} - -func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "stat", Path: name, Err: err} - } - return b.source.Stat(name) -} - -func (b *BasePathFs) Rename(oldname, newname string) (err error) { - if oldname, err = b.RealPath(oldname); err != nil { - return &os.PathError{Op: "rename", Path: oldname, Err: err} - } - if newname, err = b.RealPath(newname); err != nil { - return &os.PathError{Op: "rename", Path: newname, Err: err} - } - return b.source.Rename(oldname, newname) -} - -func (b *BasePathFs) RemoveAll(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove_all", Path: name, Err: err} - } - return b.source.RemoveAll(name) -} - -func (b *BasePathFs) Remove(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - return b.source.Remove(name) -} - -func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "openfile", Path: name, Err: err} - } - sourcef, err := b.source.OpenFile(name, flag, mode) - if err != nil { - return nil, err - } - return &BasePathFile{sourcef, b.path}, nil -} - -func (b *BasePathFs) Open(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "open", Path: name, Err: err} - } - sourcef, err := b.source.Open(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.Mkdir(name, mode) -} - -func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.MkdirAll(name, mode) -} - -func (b *BasePathFs) Create(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "create", Path: name, Err: err} - } - sourcef, err := b.source.Create(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - name, err := b.RealPath(name) - if err != nil { - return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} - } - if lstater, ok := b.source.(Lstater); ok { - return lstater.LstatIfPossible(name) - } - fi, err := b.source.Stat(name) - return fi, false, err -} - -func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error { - oldname, err := b.RealPath(oldname) - if err != nil { - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} - } - newname, err = b.RealPath(newname) - if err != nil { - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} - } - if linker, ok := b.source.(Linker); ok { - return linker.SymlinkIfPossible(oldname, newname) - } - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} -} - -func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) { - name, err := b.RealPath(name) - if err != nil { - return "", &os.PathError{Op: "readlink", Path: name, Err: err} - } - if reader, ok := b.source.(LinkReader); ok { - return reader.ReadlinkIfPossible(name) - } - return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} -} diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go deleted file mode 100644 index 017d344fd5..0000000000 --- a/vendor/github.com/spf13/afero/cacheOnReadFs.go +++ /dev/null @@ -1,315 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -// If the cache duration is 0, cache time will be unlimited, i.e. once -// a file is in the layer, the base will never be read again for this file. -// -// For cache times greater than 0, the modification time of a file is -// checked. Note that a lot of file system implementations only allow a -// resolution of a second for timestamps... or as the godoc for os.Chtimes() -// states: "The underlying filesystem may truncate or round the values to a -// less precise time unit." -// -// This caching union will forward all write calls also to the base file -// system first. To prevent writing to the base Fs, wrap it in a read-only -// filter - Note: this will also make the overlay read-only, for writing files -// in the overlay, use the overlay Fs directly, not via the union Fs. -type CacheOnReadFs struct { - base Fs - layer Fs - cacheTime time.Duration -} - -func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { - return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} -} - -type cacheState int - -const ( - // not present in the overlay, unknown if it exists in the base: - cacheMiss cacheState = iota - // present in the overlay and in base, base file is newer: - cacheStale - // present in the overlay - with cache time == 0 it may exist in the base, - // with cacheTime > 0 it exists in the base and is same age or newer in the - // overlay - cacheHit - // happens if someone writes directly to the overlay without - // going through this union - cacheLocal -) - -func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { - var lfi, bfi os.FileInfo - lfi, err = u.layer.Stat(name) - if err == nil { - if u.cacheTime == 0 { - return cacheHit, lfi, nil - } - if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { - bfi, err = u.base.Stat(name) - if err != nil { - return cacheLocal, lfi, nil - } - if bfi.ModTime().After(lfi.ModTime()) { - return cacheStale, bfi, nil - } - } - return cacheHit, lfi, nil - } - - if err == syscall.ENOENT || os.IsNotExist(err) { - return cacheMiss, nil, nil - } - - return cacheMiss, nil, err -} - -func (u *CacheOnReadFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CacheOnReadFs) copyFileToLayer(name string, flag int, perm os.FileMode) error { - return copyFileToLayer(u.base, u.layer, name, flag, perm) -} - -func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chtimes(name, atime, mtime) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chtimes(name, atime, mtime) - } - if err != nil { - return err - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chmod(name, mode) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chmod(name, mode) - } - if err != nil { - return err - } - return u.layer.Chmod(name, mode) -} - -func (u *CacheOnReadFs) Chown(name string, uid, gid int) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chown(name, uid, gid) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chown(name, uid, gid) - } - if err != nil { - return err - } - return u.layer.Chown(name, uid, gid) -} - -func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheMiss: - return u.base.Stat(name) - default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo - return fi, nil - } -} - -func (u *CacheOnReadFs) Rename(oldname, newname string) error { - st, _, err := u.cacheStatus(oldname) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Rename(oldname, newname) - case cacheStale, cacheMiss: - if err := u.copyToLayer(oldname); err != nil { - return err - } - err = u.base.Rename(oldname, newname) - } - if err != nil { - return err - } - return u.layer.Rename(oldname, newname) -} - -func (u *CacheOnReadFs) Remove(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.Remove(name) - } - if err != nil { - return err - } - return u.layer.Remove(name) -} - -func (u *CacheOnReadFs) RemoveAll(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.RemoveAll(name) - } - if err != nil { - return err - } - return u.layer.RemoveAll(name) -} - -func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - st, _, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheLocal, cacheHit: - default: - if err := u.copyFileToLayer(name, flag, perm); err != nil { - return nil, err - } - } - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - bfi, err := u.base.OpenFile(name, flag, perm) - if err != nil { - return nil, err - } - lfi, err := u.layer.OpenFile(name, flag, perm) - if err != nil { - bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? - return nil, err - } - return &UnionFile{Base: bfi, Layer: lfi}, nil - } - return u.layer.OpenFile(name, flag, perm) -} - -func (u *CacheOnReadFs) Open(name string) (File, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - - switch st { - case cacheLocal: - return u.layer.Open(name) - - case cacheMiss: - bfi, err := u.base.Stat(name) - if err != nil { - return nil, err - } - if bfi.IsDir() { - return u.base.Open(name) - } - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - - case cacheStale: - if !fi.IsDir() { - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - } - case cacheHit: - if !fi.IsDir() { - return u.layer.Open(name) - } - } - // the dirs from cacheHit, cacheStale fall down here: - bfile, _ := u.base.Open(name) - lfile, err := u.layer.Open(name) - if err != nil && bfile == nil { - return nil, err - } - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { - err := u.base.Mkdir(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache -} - -func (u *CacheOnReadFs) Name() string { - return "CacheOnReadFs" -} - -func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { - err := u.base.MkdirAll(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CacheOnReadFs) Create(name string) (File, error) { - bfh, err := u.base.Create(name) - if err != nil { - return nil, err - } - lfh, err := u.layer.Create(name) - if err != nil { - // oops, see comment about OS_TRUNC above, should we remove? then we have to - // remember if the file did not exist before - bfh.Close() - return nil, err - } - return &UnionFile{Base: bfh, Layer: lfh}, nil -} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go deleted file mode 100644 index 30855de572..0000000000 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly || zos -// +build aix darwin openbsd freebsd netbsd dragonfly zos - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go deleted file mode 100644 index 12792d21e2..0000000000 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix && !zos -// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix,!zos - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go deleted file mode 100644 index 184d6dd702..0000000000 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ /dev/null @@ -1,327 +0,0 @@ -package afero - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "time" -) - -var _ Lstater = (*CopyOnWriteFs)(nil) - -// The CopyOnWriteFs is a union filesystem: a read only base file system with -// a possibly writeable layer on top. Changes to the file system will only -// be made in the overlay: Changing an existing file in the base layer which -// is not present in the overlay will copy the file to the overlay ("changing" -// includes also calls to e.g. Chtimes(), Chmod() and Chown()). -// -// Reading directories is currently only supported via Open(), not OpenFile(). -type CopyOnWriteFs struct { - base Fs - layer Fs -} - -func NewCopyOnWriteFs(base Fs, layer Fs) Fs { - return &CopyOnWriteFs{base: base, layer: layer} -} - -// Returns true if the file is not in the overlay -func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { - if _, err := u.layer.Stat(name); err == nil { - return false, nil - } - _, err := u.base.Stat(name) - if err != nil { - if oerr, ok := err.(*os.PathError); ok { - if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { - return false, nil - } - } - if err == syscall.ENOENT { - return false, nil - } - } - return true, err -} - -func (u *CopyOnWriteFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chmod(name, mode) -} - -func (u *CopyOnWriteFs) Chown(name string, uid, gid int) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chown(name, uid, gid) -} - -func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { - fi, err := u.layer.Stat(name) - if err != nil { - isNotExist := u.isNotExist(err) - if isNotExist { - return u.base.Stat(name) - } - return nil, err - } - return fi, nil -} - -func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - llayer, ok1 := u.layer.(Lstater) - lbase, ok2 := u.base.(Lstater) - - if ok1 { - fi, b, err := llayer.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - - if !u.isNotExist(err) { - return nil, b, err - } - } - - if ok2 { - fi, b, err := lbase.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - if !u.isNotExist(err) { - return nil, b, err - } - } - - fi, err := u.Stat(name) - - return fi, false, err -} - -func (u *CopyOnWriteFs) SymlinkIfPossible(oldname, newname string) error { - if slayer, ok := u.layer.(Linker); ok { - return slayer.SymlinkIfPossible(oldname, newname) - } - - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} -} - -func (u *CopyOnWriteFs) ReadlinkIfPossible(name string) (string, error) { - if rlayer, ok := u.layer.(LinkReader); ok { - return rlayer.ReadlinkIfPossible(name) - } - - if rbase, ok := u.base.(LinkReader); ok { - return rbase.ReadlinkIfPossible(name) - } - - return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} -} - -func (u *CopyOnWriteFs) isNotExist(err error) bool { - if e, ok := err.(*os.PathError); ok { - err = e.Err - } - if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { - return true - } - return false -} - -// Renaming files present only in the base layer is not permitted -func (u *CopyOnWriteFs) Rename(oldname, newname string) error { - b, err := u.isBaseFile(oldname) - if err != nil { - return err - } - if b { - return syscall.EPERM - } - return u.layer.Rename(oldname, newname) -} - -// Removing files present only in the base layer is not permitted. If -// a file is present in the base layer and the overlay, only the overlay -// will be removed. -func (u *CopyOnWriteFs) Remove(name string) error { - err := u.layer.Remove(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) RemoveAll(name string) error { - err := u.layer.RemoveAll(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - if b { - if err = u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - dir := filepath.Dir(name) - isaDir, err := IsDir(u.base, dir) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - if isaDir { - if err = u.layer.MkdirAll(dir, 0o777); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - isaDir, err = IsDir(u.layer, dir) - if err != nil { - return nil, err - } - if isaDir { - return u.layer.OpenFile(name, flag, perm) - } - - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? - } - if b { - return u.base.OpenFile(name, flag, perm) - } - return u.layer.OpenFile(name, flag, perm) -} - -// This function handles the 9 different possibilities caused -// by the union which are the intersection of the following... -// -// layer: doesn't exist, exists as a file, and exists as a directory -// base: doesn't exist, exists as a file, and exists as a directory -func (u *CopyOnWriteFs) Open(name string) (File, error) { - // Since the overlay overrides the base we check that first - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - // If overlay doesn't exist, return the base (base state irrelevant) - if b { - return u.base.Open(name) - } - - // If overlay is a file, return it (base state irrelevant) - dir, err := IsDir(u.layer, name) - if err != nil { - return nil, err - } - if !dir { - return u.layer.Open(name) - } - - // Overlay is a directory, base state now matters. - // Base state has 3 states to check but 2 outcomes: - // A. It's a file or non-readable in the base (return just the overlay) - // B. It's an accessible directory in the base (return a UnionFile) - - // If base is file or nonreadable, return overlay - dir, err = IsDir(u.base, name) - if !dir || err != nil { - return u.layer.Open(name) - } - - // Both base & layer are directories - // Return union file (if opens are without error) - bfile, bErr := u.base.Open(name) - lfile, lErr := u.layer.Open(name) - - // If either have errors at this point something is very wrong. Return nil and the errors - if bErr != nil || lErr != nil { - return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) - } - - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - return ErrFileExists - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Name() string { - return "CopyOnWriteFs" -} - -func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - // This is in line with how os.MkdirAll behaves. - return nil - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Create(name string) (File, error) { - return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0o666) -} diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go deleted file mode 100644 index ac0de6d51f..0000000000 --- a/vendor/github.com/spf13/afero/httpFs.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "errors" - "net/http" - "os" - "path" - "path/filepath" - "strings" - "time" -) - -type httpDir struct { - basePath string - fs HttpFs -} - -func (d httpDir) Open(name string) (http.File, error) { - if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) || - strings.Contains(name, "\x00") { - return nil, errors.New("http: invalid character in file path") - } - dir := string(d.basePath) - if dir == "" { - dir = "." - } - - f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) - if err != nil { - return nil, err - } - return f, nil -} - -type HttpFs struct { - source Fs -} - -func NewHttpFs(source Fs) *HttpFs { - return &HttpFs{source: source} -} - -func (h HttpFs) Dir(s string) *httpDir { - return &httpDir{basePath: s, fs: h} -} - -func (h HttpFs) Name() string { return "h HttpFs" } - -func (h HttpFs) Create(name string) (File, error) { - return h.source.Create(name) -} - -func (h HttpFs) Chmod(name string, mode os.FileMode) error { - return h.source.Chmod(name, mode) -} - -func (h HttpFs) Chown(name string, uid, gid int) error { - return h.source.Chown(name, uid, gid) -} - -func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return h.source.Chtimes(name, atime, mtime) -} - -func (h HttpFs) Mkdir(name string, perm os.FileMode) error { - return h.source.Mkdir(name, perm) -} - -func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { - return h.source.MkdirAll(path, perm) -} - -func (h HttpFs) Open(name string) (http.File, error) { - f, err := h.source.Open(name) - if err == nil { - if httpfile, ok := f.(http.File); ok { - return httpfile, nil - } - } - return nil, err -} - -func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return h.source.OpenFile(name, flag, perm) -} - -func (h HttpFs) Remove(name string) error { - return h.source.Remove(name) -} - -func (h HttpFs) RemoveAll(path string) error { - return h.source.RemoveAll(path) -} - -func (h HttpFs) Rename(oldname, newname string) error { - return h.source.Rename(oldname, newname) -} - -func (h HttpFs) Stat(name string) (os.FileInfo, error) { - return h.source.Stat(name) -} diff --git a/vendor/github.com/spf13/afero/internal/common/adapters.go b/vendor/github.com/spf13/afero/internal/common/adapters.go deleted file mode 100644 index 60685caa54..0000000000 --- a/vendor/github.com/spf13/afero/internal/common/adapters.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright © 2022 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import "io/fs" - -// FileInfoDirEntry provides an adapter from os.FileInfo to fs.DirEntry -type FileInfoDirEntry struct { - fs.FileInfo -} - -var _ fs.DirEntry = FileInfoDirEntry{} - -func (d FileInfoDirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() } - -func (d FileInfoDirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil } diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go deleted file mode 100644 index 938b9316e6..0000000000 --- a/vendor/github.com/spf13/afero/iofs.go +++ /dev/null @@ -1,298 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package afero - -import ( - "io" - "io/fs" - "os" - "path" - "sort" - "time" - - "github.com/spf13/afero/internal/common" -) - -// IOFS adopts afero.Fs to stdlib io/fs.FS -type IOFS struct { - Fs -} - -func NewIOFS(fs Fs) IOFS { - return IOFS{Fs: fs} -} - -var ( - _ fs.FS = IOFS{} - _ fs.GlobFS = IOFS{} - _ fs.ReadDirFS = IOFS{} - _ fs.ReadFileFS = IOFS{} - _ fs.StatFS = IOFS{} - _ fs.SubFS = IOFS{} -) - -func (iofs IOFS) Open(name string) (fs.File, error) { - const op = "open" - - // by convention for fs.FS implementations we should perform this check - if !fs.ValidPath(name) { - return nil, iofs.wrapError(op, name, fs.ErrInvalid) - } - - file, err := iofs.Fs.Open(name) - if err != nil { - return nil, iofs.wrapError(op, name, err) - } - - // file should implement fs.ReadDirFile - if _, ok := file.(fs.ReadDirFile); !ok { - file = readDirFile{file} - } - - return file, nil -} - -func (iofs IOFS) Glob(pattern string) ([]string, error) { - const op = "glob" - - // afero.Glob does not perform this check but it's required for implementations - if _, err := path.Match(pattern, ""); err != nil { - return nil, iofs.wrapError(op, pattern, err) - } - - items, err := Glob(iofs.Fs, pattern) - if err != nil { - return nil, iofs.wrapError(op, pattern, err) - } - - return items, nil -} - -func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) { - f, err := iofs.Fs.Open(name) - if err != nil { - return nil, iofs.wrapError("readdir", name, err) - } - - defer f.Close() - - if rdf, ok := f.(fs.ReadDirFile); ok { - items, err := rdf.ReadDir(-1) - if err != nil { - return nil, iofs.wrapError("readdir", name, err) - } - sort.Slice(items, func(i, j int) bool { return items[i].Name() < items[j].Name() }) - return items, nil - } - - items, err := f.Readdir(-1) - if err != nil { - return nil, iofs.wrapError("readdir", name, err) - } - sort.Sort(byName(items)) - - ret := make([]fs.DirEntry, len(items)) - for i := range items { - ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} - } - - return ret, nil -} - -func (iofs IOFS) ReadFile(name string) ([]byte, error) { - const op = "readfile" - - if !fs.ValidPath(name) { - return nil, iofs.wrapError(op, name, fs.ErrInvalid) - } - - bytes, err := ReadFile(iofs.Fs, name) - if err != nil { - return nil, iofs.wrapError(op, name, err) - } - - return bytes, nil -} - -func (iofs IOFS) Sub(dir string) (fs.FS, error) { return IOFS{NewBasePathFs(iofs.Fs, dir)}, nil } - -func (IOFS) wrapError(op, path string, err error) error { - if _, ok := err.(*fs.PathError); ok { - return err // don't need to wrap again - } - - return &fs.PathError{ - Op: op, - Path: path, - Err: err, - } -} - -// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open -type readDirFile struct { - File -} - -var _ fs.ReadDirFile = readDirFile{} - -func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { - items, err := r.File.Readdir(n) - if err != nil { - return nil, err - } - - ret := make([]fs.DirEntry, len(items)) - for i := range items { - ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} - } - - return ret, nil -} - -// FromIOFS adopts io/fs.FS to use it as afero.Fs -// Note that io/fs.FS is read-only so all mutating methods will return fs.PathError with fs.ErrPermission -// To store modifications you may use afero.CopyOnWriteFs -type FromIOFS struct { - fs.FS -} - -var _ Fs = FromIOFS{} - -func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) } - -func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) } - -func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error { - return notImplemented("mkdirall", path) -} - -func (f FromIOFS) Open(name string) (File, error) { - file, err := f.FS.Open(name) - if err != nil { - return nil, err - } - - return fromIOFSFile{File: file, name: name}, nil -} - -func (f FromIOFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return f.Open(name) -} - -func (f FromIOFS) Remove(name string) error { - return notImplemented("remove", name) -} - -func (f FromIOFS) RemoveAll(path string) error { - return notImplemented("removeall", path) -} - -func (f FromIOFS) Rename(oldname, newname string) error { - return notImplemented("rename", oldname) -} - -func (f FromIOFS) Stat(name string) (os.FileInfo, error) { return fs.Stat(f.FS, name) } - -func (f FromIOFS) Name() string { return "fromiofs" } - -func (f FromIOFS) Chmod(name string, mode os.FileMode) error { - return notImplemented("chmod", name) -} - -func (f FromIOFS) Chown(name string, uid, gid int) error { - return notImplemented("chown", name) -} - -func (f FromIOFS) Chtimes(name string, atime time.Time, mtime time.Time) error { - return notImplemented("chtimes", name) -} - -type fromIOFSFile struct { - fs.File - name string -} - -func (f fromIOFSFile) ReadAt(p []byte, off int64) (n int, err error) { - readerAt, ok := f.File.(io.ReaderAt) - if !ok { - return -1, notImplemented("readat", f.name) - } - - return readerAt.ReadAt(p, off) -} - -func (f fromIOFSFile) Seek(offset int64, whence int) (int64, error) { - seeker, ok := f.File.(io.Seeker) - if !ok { - return -1, notImplemented("seek", f.name) - } - - return seeker.Seek(offset, whence) -} - -func (f fromIOFSFile) Write(p []byte) (n int, err error) { - return -1, notImplemented("write", f.name) -} - -func (f fromIOFSFile) WriteAt(p []byte, off int64) (n int, err error) { - return -1, notImplemented("writeat", f.name) -} - -func (f fromIOFSFile) Name() string { return f.name } - -func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) { - rdfile, ok := f.File.(fs.ReadDirFile) - if !ok { - return nil, notImplemented("readdir", f.name) - } - - entries, err := rdfile.ReadDir(count) - if err != nil { - return nil, err - } - - ret := make([]os.FileInfo, len(entries)) - for i := range entries { - ret[i], err = entries[i].Info() - - if err != nil { - return nil, err - } - } - - return ret, nil -} - -func (f fromIOFSFile) Readdirnames(n int) ([]string, error) { - rdfile, ok := f.File.(fs.ReadDirFile) - if !ok { - return nil, notImplemented("readdir", f.name) - } - - entries, err := rdfile.ReadDir(n) - if err != nil { - return nil, err - } - - ret := make([]string, len(entries)) - for i := range entries { - ret[i] = entries[i].Name() - } - - return ret, nil -} - -func (f fromIOFSFile) Sync() error { return nil } - -func (f fromIOFSFile) Truncate(size int64) error { - return notImplemented("truncate", f.name) -} - -func (f fromIOFSFile) WriteString(s string) (ret int, err error) { - return -1, notImplemented("writestring", f.name) -} - -func notImplemented(op, path string) error { - return &fs.PathError{Op: op, Path: path, Err: fs.ErrPermission} -} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go deleted file mode 100644 index fa6abe1eee..0000000000 --- a/vendor/github.com/spf13/afero/ioutil.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "sync" - "time" -) - -// byName implements sort.Interface. -type byName []os.FileInfo - -func (f byName) Len() int { return len(f) } -func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } -func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } - -// ReadDir reads the directory named by dirname and returns -// a list of sorted directory entries. -func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { - return ReadDir(a.Fs, dirname) -} - -func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - list, err := f.Readdir(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Sort(byName(list)) - return list, nil -} - -// ReadFile reads the file named by filename and returns the contents. -// A successful call returns err == nil, not err == EOF. Because ReadFile -// reads the whole file, it does not treat an EOF from Read as an error -// to be reported. -func (a Afero) ReadFile(filename string) ([]byte, error) { - return ReadFile(a.Fs, filename) -} - -func ReadFile(fs Fs, filename string) ([]byte, error) { - f, err := fs.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - // It's a good but not certain bet that FileInfo will tell us exactly how much to - // read, so let's try it but be prepared for the answer to be wrong. - var n int64 - - if fi, err := f.Stat(); err == nil { - // Don't preallocate a huge buffer, just in case. - if size := fi.Size(); size < 1e9 { - n = size - } - } - // As initial capacity for readAll, use n + a little extra in case Size is zero, - // and to avoid another allocation after Read has filled the buffer. The readAll - // call will read into its allocated internal buffer cheaply. If the size was - // wrong, we'll either waste some space off the end or reallocate as needed, but - // in the overwhelmingly common case we'll get it just right. - return readAll(f, n+bytes.MinRead) -} - -// readAll reads from r until an error or EOF and returns the data it read -// from the internal buffer allocated with a specified capacity. -func readAll(r io.Reader, capacity int64) (b []byte, err error) { - buf := bytes.NewBuffer(make([]byte, 0, capacity)) - // If the buffer overflows, we will get bytes.ErrTooLarge. - // Return that as an error. Any other panic remains. - defer func() { - e := recover() - if e == nil { - return - } - if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { - err = panicErr - } else { - panic(e) - } - }() - _, err = buf.ReadFrom(r) - return buf.Bytes(), err -} - -// ReadAll reads from r until an error or EOF and returns the data it read. -// A successful call returns err == nil, not err == EOF. Because ReadAll is -// defined to read from src until EOF, it does not treat an EOF from Read -// as an error to be reported. -func ReadAll(r io.Reader) ([]byte, error) { - return readAll(r, bytes.MinRead) -} - -// WriteFile writes data to a file named by filename. -// If the file does not exist, WriteFile creates it with permissions perm; -// otherwise WriteFile truncates it before writing. -func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { - return WriteFile(a.Fs, filename, data, perm) -} - -func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { - f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -// Random number state. -// We generate random temporary file names so that there's a good -// chance the file doesn't exist yet - keeps the number of tries in -// TempFile to a minimum. -var ( - randNum uint32 - randmu sync.Mutex -) - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextRandom() string { - randmu.Lock() - r := randNum - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - randNum = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFile creates a new temporary file in the directory dir, -// opens the file for reading and writing, and returns the resulting *os.File. -// The filename is generated by taking pattern and adding a random -// string to the end. If pattern includes a "*", the random string -// replaces the last "*". -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func (a Afero) TempFile(dir, pattern string) (f File, err error) { - return TempFile(a.Fs, dir, pattern) -} - -func TempFile(fs Fs, dir, pattern string) (f File, err error) { - if dir == "" { - dir = os.TempDir() - } - - var prefix, suffix string - if pos := strings.LastIndex(pattern, "*"); pos != -1 { - prefix, suffix = pattern[:pos], pattern[pos+1:] - } else { - prefix = pattern - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextRandom()+suffix) - f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - randNum = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} - -// TempDir creates a new temporary directory in the directory dir -// with a name beginning with prefix and returns the path of the -// new directory. If dir is the empty string, TempDir uses the -// default directory for temporary files (see os.TempDir). -// Multiple programs calling TempDir simultaneously -// will not choose the same directory. It is the caller's responsibility -// to remove the directory when no longer needed. -func (a Afero) TempDir(dir, prefix string) (name string, err error) { - return TempDir(a.Fs, dir, prefix) -} - -func TempDir(fs Fs, dir, prefix string) (name string, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - try := filepath.Join(dir, prefix+nextRandom()) - err = fs.Mkdir(try, 0o700) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - randNum = reseed() - randmu.Unlock() - } - continue - } - if err == nil { - name = try - } - break - } - return -} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go deleted file mode 100644 index 89c1bfc0a7..0000000000 --- a/vendor/github.com/spf13/afero/lstater.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright © 2018 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" -) - -// Lstater is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. -// Else it will call Stat. -// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. -type Lstater interface { - LstatIfPossible(name string) (os.FileInfo, bool, error) -} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go deleted file mode 100644 index 7db4b7de6e..0000000000 --- a/vendor/github.com/spf13/afero/match.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2009 The Go Authors. All rights reserved. - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "path/filepath" - "sort" - "strings" -) - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of patterns is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -// This was adapted from (http://golang.org/pkg/path/filepath) and uses several -// built-ins from that package. -func Glob(fs Fs, pattern string) (matches []string, err error) { - if !hasMeta(pattern) { - // Lstat not supported by a ll filesystems. - if _, err = lstatIfPossible(fs, pattern); err != nil { - return nil, nil - } - return []string{pattern}, nil - } - - dir, file := filepath.Split(pattern) - switch dir { - case "": - dir = "." - case string(filepath.Separator): - // nothing - default: - dir = dir[0 : len(dir)-1] // chop off trailing separator - } - - if !hasMeta(dir) { - return glob(fs, dir, file, nil) - } - - var m []string - m, err = Glob(fs, dir) - if err != nil { - return - } - for _, d := range m { - matches, err = glob(fs, d, file, matches) - if err != nil { - return - } - } - return -} - -// glob searches for files matching pattern in the directory dir -// and appends them to matches. If the directory cannot be -// opened, it returns the existing matches. New matches are -// added in lexicographical order. -func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { - m = matches - fi, err := fs.Stat(dir) - if err != nil { - return - } - if !fi.IsDir() { - return - } - d, err := fs.Open(dir) - if err != nil { - return - } - defer d.Close() - - names, _ := d.Readdirnames(-1) - sort.Strings(names) - - for _, n := range names { - matched, err := filepath.Match(pattern, n) - if err != nil { - return m, err - } - if matched { - m = append(m, filepath.Join(dir, n)) - } - } - return -} - -// hasMeta reports whether path contains any of the magic characters -// recognized by Match. -func hasMeta(path string) bool { - // TODO(niemeyer): Should other magic characters be added here? - return strings.ContainsAny(path, "*?[") -} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go deleted file mode 100644 index e104013f45..0000000000 --- a/vendor/github.com/spf13/afero/mem/dir.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -type Dir interface { - Len() int - Names() []string - Files() []*FileData - Add(*FileData) - Remove(*FileData) -} - -func RemoveFromMemDir(dir *FileData, f *FileData) { - dir.memDir.Remove(f) -} - -func AddToMemDir(dir *FileData, f *FileData) { - dir.memDir.Add(f) -} - -func InitializeDir(d *FileData) { - if d.memDir == nil { - d.dir = true - d.memDir = &DirMap{} - } -} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go deleted file mode 100644 index 03a57ee5b5..0000000000 --- a/vendor/github.com/spf13/afero/mem/dirmap.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import "sort" - -type DirMap map[string]*FileData - -func (m DirMap) Len() int { return len(m) } -func (m DirMap) Add(f *FileData) { m[f.name] = f } -func (m DirMap) Remove(f *FileData) { delete(m, f.name) } -func (m DirMap) Files() (files []*FileData) { - for _, f := range m { - files = append(files, f) - } - sort.Sort(filesSorter(files)) - return files -} - -// implement sort.Interface for []*FileData -type filesSorter []*FileData - -func (s filesSorter) Len() int { return len(s) } -func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } - -func (m DirMap) Names() (names []string) { - for x := range m { - names = append(names, x) - } - return names -} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go deleted file mode 100644 index 62fe4498e1..0000000000 --- a/vendor/github.com/spf13/afero/mem/file.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright © 2015 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import ( - "bytes" - "errors" - "io" - "io/fs" - "os" - "path/filepath" - "sync" - "sync/atomic" - "time" - - "github.com/spf13/afero/internal/common" -) - -const FilePathSeparator = string(filepath.Separator) - -var _ fs.ReadDirFile = &File{} - -type File struct { - // atomic requires 64-bit alignment for struct field access - at int64 - readDirCount int64 - closed bool - readOnly bool - fileData *FileData -} - -func NewFileHandle(data *FileData) *File { - return &File{fileData: data} -} - -func NewReadOnlyFileHandle(data *FileData) *File { - return &File{fileData: data, readOnly: true} -} - -func (f File) Data() *FileData { - return f.fileData -} - -type FileData struct { - sync.Mutex - name string - data []byte - memDir Dir - dir bool - mode os.FileMode - modtime time.Time - uid int - gid int -} - -func (d *FileData) Name() string { - d.Lock() - defer d.Unlock() - return d.name -} - -func CreateFile(name string) *FileData { - return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} -} - -func CreateDir(name string) *FileData { - return &FileData{name: name, memDir: &DirMap{}, dir: true, modtime: time.Now()} -} - -func ChangeFileName(f *FileData, newname string) { - f.Lock() - f.name = newname - f.Unlock() -} - -func SetMode(f *FileData, mode os.FileMode) { - f.Lock() - f.mode = mode - f.Unlock() -} - -func SetModTime(f *FileData, mtime time.Time) { - f.Lock() - setModTime(f, mtime) - f.Unlock() -} - -func setModTime(f *FileData, mtime time.Time) { - f.modtime = mtime -} - -func SetUID(f *FileData, uid int) { - f.Lock() - f.uid = uid - f.Unlock() -} - -func SetGID(f *FileData, gid int) { - f.Lock() - f.gid = gid - f.Unlock() -} - -func GetFileInfo(f *FileData) *FileInfo { - return &FileInfo{f} -} - -func (f *File) Open() error { - atomic.StoreInt64(&f.at, 0) - atomic.StoreInt64(&f.readDirCount, 0) - f.fileData.Lock() - f.closed = false - f.fileData.Unlock() - return nil -} - -func (f *File) Close() error { - f.fileData.Lock() - f.closed = true - if !f.readOnly { - setModTime(f.fileData, time.Now()) - } - f.fileData.Unlock() - return nil -} - -func (f *File) Name() string { - return f.fileData.Name() -} - -func (f *File) Stat() (os.FileInfo, error) { - return &FileInfo{f.fileData}, nil -} - -func (f *File) Sync() error { - return nil -} - -func (f *File) Readdir(count int) (res []os.FileInfo, err error) { - if !f.fileData.dir { - return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} - } - var outLength int64 - - f.fileData.Lock() - files := f.fileData.memDir.Files()[f.readDirCount:] - if count > 0 { - if len(files) < count { - outLength = int64(len(files)) - } else { - outLength = int64(count) - } - if len(files) == 0 { - err = io.EOF - } - } else { - outLength = int64(len(files)) - } - f.readDirCount += outLength - f.fileData.Unlock() - - res = make([]os.FileInfo, outLength) - for i := range res { - res[i] = &FileInfo{files[i]} - } - - return res, err -} - -func (f *File) Readdirnames(n int) (names []string, err error) { - fi, err := f.Readdir(n) - names = make([]string, len(fi)) - for i, f := range fi { - _, names[i] = filepath.Split(f.Name()) - } - return names, err -} - -// Implements fs.ReadDirFile -func (f *File) ReadDir(n int) ([]fs.DirEntry, error) { - fi, err := f.Readdir(n) - if err != nil { - return nil, err - } - di := make([]fs.DirEntry, len(fi)) - for i, f := range fi { - di[i] = common.FileInfoDirEntry{FileInfo: f} - } - return di, nil -} - -func (f *File) Read(b []byte) (n int, err error) { - f.fileData.Lock() - defer f.fileData.Unlock() - if f.closed { - return 0, ErrFileClosed - } - if len(b) > 0 && int(f.at) == len(f.fileData.data) { - return 0, io.EOF - } - if int(f.at) > len(f.fileData.data) { - return 0, io.ErrUnexpectedEOF - } - if len(f.fileData.data)-int(f.at) >= len(b) { - n = len(b) - } else { - n = len(f.fileData.data) - int(f.at) - } - copy(b, f.fileData.data[f.at:f.at+int64(n)]) - atomic.AddInt64(&f.at, int64(n)) - return -} - -func (f *File) ReadAt(b []byte, off int64) (n int, err error) { - prev := atomic.LoadInt64(&f.at) - atomic.StoreInt64(&f.at, off) - n, err = f.Read(b) - atomic.StoreInt64(&f.at, prev) - return -} - -func (f *File) Truncate(size int64) error { - if f.closed { - return ErrFileClosed - } - if f.readOnly { - return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - if size < 0 { - return ErrOutOfRange - } - f.fileData.Lock() - defer f.fileData.Unlock() - if size > int64(len(f.fileData.data)) { - diff := size - int64(len(f.fileData.data)) - f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{0o0}, int(diff))...) - } else { - f.fileData.data = f.fileData.data[0:size] - } - setModTime(f.fileData, time.Now()) - return nil -} - -func (f *File) Seek(offset int64, whence int) (int64, error) { - if f.closed { - return 0, ErrFileClosed - } - switch whence { - case io.SeekStart: - atomic.StoreInt64(&f.at, offset) - case io.SeekCurrent: - atomic.AddInt64(&f.at, offset) - case io.SeekEnd: - atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) - } - return f.at, nil -} - -func (f *File) Write(b []byte) (n int, err error) { - if f.closed { - return 0, ErrFileClosed - } - if f.readOnly { - return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - n = len(b) - cur := atomic.LoadInt64(&f.at) - f.fileData.Lock() - defer f.fileData.Unlock() - diff := cur - int64(len(f.fileData.data)) - var tail []byte - if n+int(cur) < len(f.fileData.data) { - tail = f.fileData.data[n+int(cur):] - } - if diff > 0 { - f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...) - f.fileData.data = append(f.fileData.data, tail...) - } else { - f.fileData.data = append(f.fileData.data[:cur], b...) - f.fileData.data = append(f.fileData.data, tail...) - } - setModTime(f.fileData, time.Now()) - - atomic.AddInt64(&f.at, int64(n)) - return -} - -func (f *File) WriteAt(b []byte, off int64) (n int, err error) { - atomic.StoreInt64(&f.at, off) - return f.Write(b) -} - -func (f *File) WriteString(s string) (ret int, err error) { - return f.Write([]byte(s)) -} - -func (f *File) Info() *FileInfo { - return &FileInfo{f.fileData} -} - -type FileInfo struct { - *FileData -} - -// Implements os.FileInfo -func (s *FileInfo) Name() string { - s.Lock() - _, name := filepath.Split(s.name) - s.Unlock() - return name -} - -func (s *FileInfo) Mode() os.FileMode { - s.Lock() - defer s.Unlock() - return s.mode -} - -func (s *FileInfo) ModTime() time.Time { - s.Lock() - defer s.Unlock() - return s.modtime -} - -func (s *FileInfo) IsDir() bool { - s.Lock() - defer s.Unlock() - return s.dir -} -func (s *FileInfo) Sys() interface{} { return nil } -func (s *FileInfo) Size() int64 { - if s.IsDir() { - return int64(42) - } - s.Lock() - defer s.Unlock() - return int64(len(s.data)) -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("out of range") - ErrTooLarge = errors.New("too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go deleted file mode 100644 index d6c744e8d5..0000000000 --- a/vendor/github.com/spf13/afero/memmap.go +++ /dev/null @@ -1,465 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "fmt" - "io" - - "log" - "os" - "path/filepath" - - "sort" - "strings" - "sync" - "time" - - "github.com/spf13/afero/mem" -) - -const chmodBits = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky // Only a subset of bits are allowed to be changed. Documented under os.Chmod() - -type MemMapFs struct { - mu sync.RWMutex - data map[string]*mem.FileData - init sync.Once -} - -func NewMemMapFs() Fs { - return &MemMapFs{} -} - -func (m *MemMapFs) getData() map[string]*mem.FileData { - m.init.Do(func() { - m.data = make(map[string]*mem.FileData) - // Root should always exist, right? - // TODO: what about windows? - root := mem.CreateDir(FilePathSeparator) - mem.SetMode(root, os.ModeDir|0o755) - m.data[FilePathSeparator] = root - }) - return m.data -} - -func (*MemMapFs) Name() string { return "MemMapFS" } - -func (m *MemMapFs) Create(name string) (File, error) { - name = normalizePath(name) - m.mu.Lock() - file := mem.CreateFile(name) - m.getData()[name] = file - m.registerWithParent(file, 0) - m.mu.Unlock() - return mem.NewFileHandle(file), nil -} - -func (m *MemMapFs) unRegisterWithParent(fileName string) error { - f, err := m.lockfreeOpen(fileName) - if err != nil { - return err - } - parent := m.findParent(f) - if parent == nil { - log.Panic("parent of ", f.Name(), " is nil") - } - - parent.Lock() - mem.RemoveFromMemDir(parent, f) - parent.Unlock() - return nil -} - -func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { - pdir, _ := filepath.Split(f.Name()) - pdir = filepath.Clean(pdir) - pfile, err := m.lockfreeOpen(pdir) - if err != nil { - return nil - } - return pfile -} - -func (m *MemMapFs) findDescendants(name string) []*mem.FileData { - fData := m.getData() - descendants := make([]*mem.FileData, 0, len(fData)) - for p, dFile := range fData { - if strings.HasPrefix(p, name+FilePathSeparator) { - descendants = append(descendants, dFile) - } - } - - sort.Slice(descendants, func(i, j int) bool { - cur := len(strings.Split(descendants[i].Name(), FilePathSeparator)) - next := len(strings.Split(descendants[j].Name(), FilePathSeparator)) - return cur < next - }) - - return descendants -} - -func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { - if f == nil { - return - } - parent := m.findParent(f) - if parent == nil { - pdir := filepath.Dir(filepath.Clean(f.Name())) - err := m.lockfreeMkdir(pdir, perm) - if err != nil { - // log.Println("Mkdir error:", err) - return - } - parent, err = m.lockfreeOpen(pdir) - if err != nil { - // log.Println("Open after Mkdir error:", err) - return - } - } - - parent.Lock() - mem.InitializeDir(parent) - mem.AddToMemDir(parent, f) - parent.Unlock() -} - -func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { - name = normalizePath(name) - x, ok := m.getData()[name] - if ok { - // Only return ErrFileExists if it's a file, not a directory. - i := mem.FileInfo{FileData: x} - if !i.IsDir() { - return ErrFileExists - } - } else { - item := mem.CreateDir(name) - mem.SetMode(item, os.ModeDir|perm) - m.getData()[name] = item - m.registerWithParent(item, perm) - } - return nil -} - -func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { - perm &= chmodBits - name = normalizePath(name) - - m.mu.RLock() - _, ok := m.getData()[name] - m.mu.RUnlock() - if ok { - return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} - } - - m.mu.Lock() - // Dobule check that it doesn't exist. - if _, ok := m.getData()[name]; ok { - m.mu.Unlock() - return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} - } - item := mem.CreateDir(name) - mem.SetMode(item, os.ModeDir|perm) - m.getData()[name] = item - m.registerWithParent(item, perm) - m.mu.Unlock() - - return m.setFileMode(name, perm|os.ModeDir) -} - -func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { - err := m.Mkdir(path, perm) - if err != nil { - if err.(*os.PathError).Err == ErrFileExists { - return nil - } - return err - } - return nil -} - -// Handle some relative paths -func normalizePath(path string) string { - path = filepath.Clean(path) - - switch path { - case ".": - return FilePathSeparator - case "..": - return FilePathSeparator - default: - return path - } -} - -func (m *MemMapFs) Open(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewReadOnlyFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) openWrite(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) open(name string) (*mem.FileData, error) { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} - } - return f, nil -} - -func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { - name = normalizePath(name) - f, ok := m.getData()[name] - if ok { - return f, nil - } else { - return nil, ErrFileNotFound - } -} - -func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - perm &= chmodBits - chmod := false - file, err := m.openWrite(name) - if err == nil && (flag&os.O_EXCL > 0) { - return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileExists} - } - if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { - file, err = m.Create(name) - chmod = true - } - if err != nil { - return nil, err - } - if flag == os.O_RDONLY { - file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) - } - if flag&os.O_APPEND > 0 { - _, err = file.Seek(0, io.SeekEnd) - if err != nil { - file.Close() - return nil, err - } - } - if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { - err = file.Truncate(0) - if err != nil { - file.Close() - return nil, err - } - } - if chmod { - return file, m.setFileMode(name, perm) - } - return file, nil -} - -func (m *MemMapFs) Remove(name string) error { - name = normalizePath(name) - - m.mu.Lock() - defer m.mu.Unlock() - - if _, ok := m.getData()[name]; ok { - err := m.unRegisterWithParent(name) - if err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - delete(m.getData(), name) - } else { - return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} - } - return nil -} - -func (m *MemMapFs) RemoveAll(path string) error { - path = normalizePath(path) - m.mu.Lock() - m.unRegisterWithParent(path) - m.mu.Unlock() - - m.mu.RLock() - defer m.mu.RUnlock() - - for p := range m.getData() { - if p == path || strings.HasPrefix(p, path+FilePathSeparator) { - m.mu.RUnlock() - m.mu.Lock() - delete(m.getData(), p) - m.mu.Unlock() - m.mu.RLock() - } - } - return nil -} - -func (m *MemMapFs) Rename(oldname, newname string) error { - oldname = normalizePath(oldname) - newname = normalizePath(newname) - - if oldname == newname { - return nil - } - - m.mu.RLock() - defer m.mu.RUnlock() - if _, ok := m.getData()[oldname]; ok { - m.mu.RUnlock() - m.mu.Lock() - err := m.unRegisterWithParent(oldname) - if err != nil { - return err - } - - fileData := m.getData()[oldname] - mem.ChangeFileName(fileData, newname) - m.getData()[newname] = fileData - - err = m.renameDescendants(oldname, newname) - if err != nil { - return err - } - - delete(m.getData(), oldname) - - m.registerWithParent(fileData, 0) - m.mu.Unlock() - m.mu.RLock() - } else { - return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} - } - return nil -} - -func (m *MemMapFs) renameDescendants(oldname, newname string) error { - descendants := m.findDescendants(oldname) - removes := make([]string, 0, len(descendants)) - for _, desc := range descendants { - descNewName := strings.Replace(desc.Name(), oldname, newname, 1) - err := m.unRegisterWithParent(desc.Name()) - if err != nil { - return err - } - - removes = append(removes, desc.Name()) - mem.ChangeFileName(desc, descNewName) - m.getData()[descNewName] = desc - - m.registerWithParent(desc, 0) - } - for _, r := range removes { - delete(m.getData(), r) - } - - return nil -} - -func (m *MemMapFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - fileInfo, err := m.Stat(name) - return fileInfo, false, err -} - -func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { - f, err := m.Open(name) - if err != nil { - return nil, err - } - fi := mem.GetFileInfo(f.(*mem.File).Data()) - return fi, nil -} - -func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { - mode &= chmodBits - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} - } - prevOtherBits := mem.GetFileInfo(f).Mode() & ^chmodBits - - mode = prevOtherBits | mode - return m.setFileMode(name, mode) -} - -func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetMode(f, mode) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) Chown(name string, uid, gid int) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chown", Path: name, Err: ErrFileNotFound} - } - - mem.SetUID(f, uid) - mem.SetGID(f, gid) - - return nil -} - -func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetModTime(f, mtime) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) List() { - for _, x := range m.data { - y := mem.FileInfo{FileData: x} - fmt.Println(x.Name(), y.Size()) - } -} diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go deleted file mode 100644 index f1366321ec..0000000000 --- a/vendor/github.com/spf13/afero/os.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "time" -) - -var _ Lstater = (*OsFs)(nil) - -// OsFs is a Fs implementation that uses functions provided by the os package. -// -// For details in any method, check the documentation of the os package -// (http://golang.org/pkg/os/). -type OsFs struct{} - -func NewOsFs() Fs { - return &OsFs{} -} - -func (OsFs) Name() string { return "OsFs" } - -func (OsFs) Create(name string) (File, error) { - f, e := os.Create(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Mkdir(name string, perm os.FileMode) error { - return os.Mkdir(name, perm) -} - -func (OsFs) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -func (OsFs) Open(name string) (File, error) { - f, e := os.Open(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, e := os.OpenFile(name, flag, perm) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Remove(name string) error { - return os.Remove(name) -} - -func (OsFs) RemoveAll(path string) error { - return os.RemoveAll(path) -} - -func (OsFs) Rename(oldname, newname string) error { - return os.Rename(oldname, newname) -} - -func (OsFs) Stat(name string) (os.FileInfo, error) { - return os.Stat(name) -} - -func (OsFs) Chmod(name string, mode os.FileMode) error { - return os.Chmod(name, mode) -} - -func (OsFs) Chown(name string, uid, gid int) error { - return os.Chown(name, uid, gid) -} - -func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return os.Chtimes(name, atime, mtime) -} - -func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - fi, err := os.Lstat(name) - return fi, true, err -} - -func (OsFs) SymlinkIfPossible(oldname, newname string) error { - return os.Symlink(oldname, newname) -} - -func (OsFs) ReadlinkIfPossible(name string) (string, error) { - return os.Readlink(name) -} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go deleted file mode 100644 index 18f60a0f6b..0000000000 --- a/vendor/github.com/spf13/afero/path.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "path/filepath" - "sort" -) - -// readDirNames reads the directory named by dirname and returns -// a sorted list of directory entries. -// adapted from https://golang.org/src/path/filepath/path.go -func readDirNames(fs Fs, dirname string) ([]string, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - names, err := f.Readdirnames(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - -// walk recursively descends path, calling walkFn -// adapted from https://golang.org/src/path/filepath/path.go -func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { - err := walkFn(path, info, nil) - if err != nil { - if info.IsDir() && err == filepath.SkipDir { - return nil - } - return err - } - - if !info.IsDir() { - return nil - } - - names, err := readDirNames(fs, path) - if err != nil { - return walkFn(path, info, err) - } - - for _, name := range names { - filename := filepath.Join(path, name) - fileInfo, err := lstatIfPossible(fs, filename) - if err != nil { - if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { - return err - } - } else { - err = walk(fs, filename, fileInfo, walkFn) - if err != nil { - if !fileInfo.IsDir() || err != filepath.SkipDir { - return err - } - } - } - } - return nil -} - -// if the filesystem supports it, use Lstat, else use fs.Stat -func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { - if lfs, ok := fs.(Lstater); ok { - fi, _, err := lfs.LstatIfPossible(path) - return fi, err - } - return fs.Stat(path) -} - -// Walk walks the file tree rooted at root, calling walkFn for each file or -// directory in the tree, including root. All errors that arise visiting files -// and directories are filtered by walkFn. The files are walked in lexical -// order, which makes the output deterministic but means that for very -// large directories Walk can be inefficient. -// Walk does not follow symbolic links. - -func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { - return Walk(a.Fs, root, walkFn) -} - -func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { - info, err := lstatIfPossible(fs, root) - if err != nil { - return walkFn(root, nil, err) - } - return walk(fs, root, info, walkFn) -} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go deleted file mode 100644 index bd8f9264dd..0000000000 --- a/vendor/github.com/spf13/afero/readonlyfs.go +++ /dev/null @@ -1,96 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -var _ Lstater = (*ReadOnlyFs)(nil) - -type ReadOnlyFs struct { - source Fs -} - -func NewReadOnlyFs(source Fs) Fs { - return &ReadOnlyFs{source: source} -} - -func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { - return ReadDir(r.source, name) -} - -func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Chown(n string, uid, gid int) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Name() string { - return "ReadOnlyFilter" -} - -func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { - return r.source.Stat(name) -} - -func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - if lsf, ok := r.source.(Lstater); ok { - return lsf.LstatIfPossible(name) - } - fi, err := r.Stat(name) - return fi, false, err -} - -func (r *ReadOnlyFs) SymlinkIfPossible(oldname, newname string) error { - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} -} - -func (r *ReadOnlyFs) ReadlinkIfPossible(name string) (string, error) { - if srdr, ok := r.source.(LinkReader); ok { - return srdr.ReadlinkIfPossible(name) - } - - return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} -} - -func (r *ReadOnlyFs) Rename(o, n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) RemoveAll(p string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Remove(n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - return nil, syscall.EPERM - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *ReadOnlyFs) Open(n string) (File, error) { - return r.source.Open(n) -} - -func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Create(n string) (File, error) { - return nil, syscall.EPERM -} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go deleted file mode 100644 index 218f3b235b..0000000000 --- a/vendor/github.com/spf13/afero/regexpfs.go +++ /dev/null @@ -1,223 +0,0 @@ -package afero - -import ( - "os" - "regexp" - "syscall" - "time" -) - -// The RegexpFs filters files (not directories) by regular expression. Only -// files matching the given regexp will be allowed, all others get a ENOENT error ( -// "No such file or directory"). -type RegexpFs struct { - re *regexp.Regexp - source Fs -} - -func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { - return &RegexpFs{source: source, re: re} -} - -type RegexpFile struct { - f File - re *regexp.Regexp -} - -func (r *RegexpFs) matchesName(name string) error { - if r.re == nil { - return nil - } - if r.re.MatchString(name) { - return nil - } - return syscall.ENOENT -} - -func (r *RegexpFs) dirOrMatches(name string) error { - dir, err := IsDir(r.source, name) - if err != nil { - return err - } - if dir { - return nil - } - return r.matchesName(name) -} - -func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chtimes(name, a, m) -} - -func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chmod(name, mode) -} - -func (r *RegexpFs) Chown(name string, uid, gid int) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chown(name, uid, gid) -} - -func (r *RegexpFs) Name() string { - return "RegexpFs" -} - -func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.Stat(name) -} - -func (r *RegexpFs) Rename(oldname, newname string) error { - dir, err := IsDir(r.source, oldname) - if err != nil { - return err - } - if dir { - return nil - } - if err := r.matchesName(oldname); err != nil { - return err - } - if err := r.matchesName(newname); err != nil { - return err - } - return r.source.Rename(oldname, newname) -} - -func (r *RegexpFs) RemoveAll(p string) error { - dir, err := IsDir(r.source, p) - if err != nil { - return err - } - if !dir { - if err := r.matchesName(p); err != nil { - return err - } - } - return r.source.RemoveAll(p) -} - -func (r *RegexpFs) Remove(name string) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Remove(name) -} - -func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *RegexpFs) Open(name string) (File, error) { - dir, err := IsDir(r.source, name) - if err != nil { - return nil, err - } - if !dir { - if err := r.matchesName(name); err != nil { - return nil, err - } - } - f, err := r.source.Open(name) - if err != nil { - return nil, err - } - return &RegexpFile{f: f, re: r.re}, nil -} - -func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { - return r.source.Mkdir(n, p) -} - -func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { - return r.source.MkdirAll(n, p) -} - -func (r *RegexpFs) Create(name string) (File, error) { - if err := r.matchesName(name); err != nil { - return nil, err - } - return r.source.Create(name) -} - -func (f *RegexpFile) Close() error { - return f.f.Close() -} - -func (f *RegexpFile) Read(s []byte) (int, error) { - return f.f.Read(s) -} - -func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { - return f.f.ReadAt(s, o) -} - -func (f *RegexpFile) Seek(o int64, w int) (int64, error) { - return f.f.Seek(o, w) -} - -func (f *RegexpFile) Write(s []byte) (int, error) { - return f.f.Write(s) -} - -func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { - return f.f.WriteAt(s, o) -} - -func (f *RegexpFile) Name() string { - return f.f.Name() -} - -func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { - var rfi []os.FileInfo - rfi, err = f.f.Readdir(c) - if err != nil { - return nil, err - } - for _, i := range rfi { - if i.IsDir() || f.re.MatchString(i.Name()) { - fi = append(fi, i) - } - } - return fi, nil -} - -func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { - fi, err := f.Readdir(c) - if err != nil { - return nil, err - } - for _, s := range fi { - n = append(n, s.Name()) - } - return n, nil -} - -func (f *RegexpFile) Stat() (os.FileInfo, error) { - return f.f.Stat() -} - -func (f *RegexpFile) Sync() error { - return f.f.Sync() -} - -func (f *RegexpFile) Truncate(s int64) error { - return f.f.Truncate(s) -} - -func (f *RegexpFile) WriteString(s string) (int, error) { - return f.f.WriteString(s) -} diff --git a/vendor/github.com/spf13/afero/symlink.go b/vendor/github.com/spf13/afero/symlink.go deleted file mode 100644 index aa6ae125b6..0000000000 --- a/vendor/github.com/spf13/afero/symlink.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright © 2018 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "errors" -) - -// Symlinker is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It indicates support for 3 symlink related interfaces that implement the -// behaviors of the os methods: -// - Lstat -// - Symlink, and -// - Readlink -type Symlinker interface { - Lstater - Linker - LinkReader -} - -// Linker is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It will call Symlink if the filesystem itself is, or it delegates to, the os filesystem, -// or the filesystem otherwise supports Symlink's. -type Linker interface { - SymlinkIfPossible(oldname, newname string) error -} - -// ErrNoSymlink is the error that will be wrapped in an os.LinkError if a file system -// does not support Symlink's either directly or through its delegated filesystem. -// As expressed by support for the Linker interface. -var ErrNoSymlink = errors.New("symlink not supported") - -// LinkReader is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -type LinkReader interface { - ReadlinkIfPossible(name string) (string, error) -} - -// ErrNoReadlink is the error that will be wrapped in an os.Path if a file system -// does not support the readlink operation either directly or through its delegated filesystem. -// As expressed by support for the LinkReader interface. -var ErrNoReadlink = errors.New("readlink not supported") diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go deleted file mode 100644 index 62dd6c93c8..0000000000 --- a/vendor/github.com/spf13/afero/unionFile.go +++ /dev/null @@ -1,330 +0,0 @@ -package afero - -import ( - "io" - "os" - "path/filepath" - "syscall" -) - -// The UnionFile implements the afero.File interface and will be returned -// when reading a directory present at least in the overlay or opening a file -// for writing. -// -// The calls to -// Readdir() and Readdirnames() merge the file os.FileInfo / names from the -// base and the overlay - for files present in both layers, only those -// from the overlay will be used. -// -// When opening files for writing (Create() / OpenFile() with the right flags) -// the operations will be done in both layers, starting with the overlay. A -// successful read in the overlay will move the cursor position in the base layer -// by the number of bytes read. -type UnionFile struct { - Base File - Layer File - Merger DirsMerger - off int - files []os.FileInfo -} - -func (f *UnionFile) Close() error { - // first close base, so we have a newer timestamp in the overlay. If we'd close - // the overlay first, we'd get a cacheStale the next time we access this file - // -> cache would be useless ;-) - if f.Base != nil { - f.Base.Close() - } - if f.Layer != nil { - return f.Layer.Close() - } - return BADFD -} - -func (f *UnionFile) Read(s []byte) (int, error) { - if f.Layer != nil { - n, err := f.Layer.Read(s) - if (err == nil || err == io.EOF) && f.Base != nil { - // advance the file position also in the base file, the next - // call may be a write at this position (or a seek with SEEK_CUR) - if _, seekErr := f.Base.Seek(int64(n), io.SeekCurrent); seekErr != nil { - // only overwrite err in case the seek fails: we need to - // report an eventual io.EOF to the caller - err = seekErr - } - } - return n, err - } - if f.Base != nil { - return f.Base.Read(s) - } - return 0, BADFD -} - -func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { - if f.Layer != nil { - n, err := f.Layer.ReadAt(s, o) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o+int64(n), io.SeekStart) - } - return n, err - } - if f.Base != nil { - return f.Base.ReadAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { - if f.Layer != nil { - pos, err = f.Layer.Seek(o, w) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o, w) - } - return pos, err - } - if f.Base != nil { - return f.Base.Seek(o, w) - } - return 0, BADFD -} - -func (f *UnionFile) Write(s []byte) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.Write(s) - if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? - _, err = f.Base.Write(s) - } - return n, err - } - if f.Base != nil { - return f.Base.Write(s) - } - return 0, BADFD -} - -func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteAt(s, o) - if err == nil && f.Base != nil { - _, err = f.Base.WriteAt(s, o) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Name() string { - if f.Layer != nil { - return f.Layer.Name() - } - return f.Base.Name() -} - -// DirsMerger is how UnionFile weaves two directories together. -// It takes the FileInfo slices from the layer and the base and returns a -// single view. -type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) - -var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { - files := make(map[string]os.FileInfo) - - for _, fi := range lofi { - files[fi.Name()] = fi - } - - for _, fi := range bofi { - if _, exists := files[fi.Name()]; !exists { - files[fi.Name()] = fi - } - } - - rfi := make([]os.FileInfo, len(files)) - - i := 0 - for _, fi := range files { - rfi[i] = fi - i++ - } - - return rfi, nil -} - -// Readdir will weave the two directories together and -// return a single view of the overlayed directories. -// At the end of the directory view, the error is io.EOF if c > 0. -func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { - var merge DirsMerger = f.Merger - if merge == nil { - merge = defaultUnionMergeDirsFn - } - - if f.off == 0 { - var lfi []os.FileInfo - if f.Layer != nil { - lfi, err = f.Layer.Readdir(-1) - if err != nil { - return nil, err - } - } - - var bfi []os.FileInfo - if f.Base != nil { - bfi, err = f.Base.Readdir(-1) - if err != nil { - return nil, err - } - - } - merged, err := merge(lfi, bfi) - if err != nil { - return nil, err - } - f.files = append(f.files, merged...) - } - files := f.files[f.off:] - - if c <= 0 { - return files, nil - } - - if len(files) == 0 { - return nil, io.EOF - } - - if c > len(files) { - c = len(files) - } - - defer func() { f.off += c }() - return files[:c], nil -} - -func (f *UnionFile) Readdirnames(c int) ([]string, error) { - rfi, err := f.Readdir(c) - if err != nil { - return nil, err - } - var names []string - for _, fi := range rfi { - names = append(names, fi.Name()) - } - return names, nil -} - -func (f *UnionFile) Stat() (os.FileInfo, error) { - if f.Layer != nil { - return f.Layer.Stat() - } - if f.Base != nil { - return f.Base.Stat() - } - return nil, BADFD -} - -func (f *UnionFile) Sync() (err error) { - if f.Layer != nil { - err = f.Layer.Sync() - if err == nil && f.Base != nil { - err = f.Base.Sync() - } - return err - } - if f.Base != nil { - return f.Base.Sync() - } - return BADFD -} - -func (f *UnionFile) Truncate(s int64) (err error) { - if f.Layer != nil { - err = f.Layer.Truncate(s) - if err == nil && f.Base != nil { - err = f.Base.Truncate(s) - } - return err - } - if f.Base != nil { - return f.Base.Truncate(s) - } - return BADFD -} - -func (f *UnionFile) WriteString(s string) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteString(s) - if err == nil && f.Base != nil { - _, err = f.Base.WriteString(s) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteString(s) - } - return 0, BADFD -} - -func copyFile(base Fs, layer Fs, name string, bfh File) error { - // First make sure the directory exists - exists, err := Exists(layer, filepath.Dir(name)) - if err != nil { - return err - } - if !exists { - err = layer.MkdirAll(filepath.Dir(name), 0o777) // FIXME? - if err != nil { - return err - } - } - - // Create the file on the overlay - lfh, err := layer.Create(name) - if err != nil { - return err - } - n, err := io.Copy(lfh, bfh) - if err != nil { - // If anything fails, clean up the file - layer.Remove(name) - lfh.Close() - return err - } - - bfi, err := bfh.Stat() - if err != nil || bfi.Size() != n { - layer.Remove(name) - lfh.Close() - return syscall.EIO - } - - err = lfh.Close() - if err != nil { - layer.Remove(name) - lfh.Close() - return err - } - return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) -} - -func copyToLayer(base Fs, layer Fs, name string) error { - bfh, err := base.Open(name) - if err != nil { - return err - } - defer bfh.Close() - - return copyFile(base, layer, name, bfh) -} - -func copyFileToLayer(base Fs, layer Fs, name string, flag int, perm os.FileMode) error { - bfh, err := base.OpenFile(name, flag, perm) - if err != nil { - return err - } - defer bfh.Close() - - return copyFile(base, layer, name, bfh) -} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go deleted file mode 100644 index 9e4cba2746..0000000000 --- a/vendor/github.com/spf13/afero/util.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright ©2015 Steve Francia -// Portions Copyright ©2015 The Hugo Authors -// Portions Copyright 2016-present Bjørn Erik Pedersen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "unicode" - - "golang.org/x/text/runes" - "golang.org/x/text/transform" - "golang.org/x/text/unicode/norm" -) - -// Filepath separator defined by os.Separator. -const FilePathSeparator = string(filepath.Separator) - -// Takes a reader and a path and writes the content -func (a Afero) WriteReader(path string, r io.Reader) (err error) { - return WriteReader(a.Fs, path, r) -} - -func WriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r - if err != nil { - if err != os.ErrExist { - return err - } - } - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -// Same as WriteReader but checks to see if file/directory already exists. -func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { - return SafeWriteReader(a.Fs, path, r) -} - -func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r - if err != nil { - return - } - } - - exists, err := Exists(fs, path) - if err != nil { - return - } - if exists { - return fmt.Errorf("%v already exists", path) - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -func (a Afero) GetTempDir(subPath string) string { - return GetTempDir(a.Fs, subPath) -} - -// GetTempDir returns the default temp directory with trailing slash -// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx -func GetTempDir(fs Fs, subPath string) string { - addSlash := func(p string) string { - if FilePathSeparator != p[len(p)-1:] { - p = p + FilePathSeparator - } - return p - } - dir := addSlash(os.TempDir()) - - if subPath != "" { - // preserve windows backslash :-( - if FilePathSeparator == "\\" { - subPath = strings.Replace(subPath, "\\", "____", -1) - } - dir = dir + UnicodeSanitize((subPath)) - if FilePathSeparator == "\\" { - dir = strings.Replace(dir, "____", "\\", -1) - } - - if exists, _ := Exists(fs, dir); exists { - return addSlash(dir) - } - - err := fs.MkdirAll(dir, 0o777) - if err != nil { - panic(err) - } - dir = addSlash(dir) - } - return dir -} - -// Rewrite string to remove non-standard path characters -func UnicodeSanitize(s string) string { - source := []rune(s) - target := make([]rune, 0, len(source)) - - for _, r := range source { - if unicode.IsLetter(r) || - unicode.IsDigit(r) || - unicode.IsMark(r) || - r == '.' || - r == '/' || - r == '\\' || - r == '_' || - r == '-' || - r == '%' || - r == ' ' || - r == '#' { - target = append(target, r) - } - } - - return string(target) -} - -// Transform characters with accents into plain forms. -func NeuterAccents(s string) string { - t := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC) - result, _, _ := transform.String(t, string(s)) - - return result -} - -func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { - return FileContainsBytes(a.Fs, filename, subslice) -} - -// Check if a file contains a specified byte slice. -func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslice), nil -} - -func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { - return FileContainsAnyBytes(a.Fs, filename, subslices) -} - -// Check if a file contains any of the specified byte slices. -func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslices...), nil -} - -// readerContains reports whether any of the subslices is within r. -func readerContainsAny(r io.Reader, subslices ...[]byte) bool { - if r == nil || len(subslices) == 0 { - return false - } - - largestSlice := 0 - - for _, sl := range subslices { - if len(sl) > largestSlice { - largestSlice = len(sl) - } - } - - if largestSlice == 0 { - return false - } - - bufflen := largestSlice * 4 - halflen := bufflen / 2 - buff := make([]byte, bufflen) - var err error - var n, i int - - for { - i++ - if i == 1 { - n, err = io.ReadAtLeast(r, buff[:halflen], halflen) - } else { - if i != 2 { - // shift left to catch overlapping matches - copy(buff[:], buff[halflen:]) - } - n, err = io.ReadAtLeast(r, buff[halflen:], halflen) - } - - if n > 0 { - for _, sl := range subslices { - if bytes.Contains(buff, sl) { - return true - } - } - } - - if err != nil { - break - } - } - return false -} - -func (a Afero) DirExists(path string) (bool, error) { - return DirExists(a.Fs, path) -} - -// DirExists checks if a path exists and is a directory. -func DirExists(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err == nil && fi.IsDir() { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func (a Afero) IsDir(path string) (bool, error) { - return IsDir(a.Fs, path) -} - -// IsDir checks if a given path is a directory. -func IsDir(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - return fi.IsDir(), nil -} - -func (a Afero) IsEmpty(path string) (bool, error) { - return IsEmpty(a.Fs, path) -} - -// IsEmpty checks if a given file or directory is empty. -func IsEmpty(fs Fs, path string) (bool, error) { - if b, _ := Exists(fs, path); !b { - return false, fmt.Errorf("%q path does not exist", path) - } - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - if fi.IsDir() { - f, err := fs.Open(path) - if err != nil { - return false, err - } - defer f.Close() - list, err := f.Readdir(-1) - if err != nil { - return false, err - } - return len(list) == 0, nil - } - return fi.Size() == 0, nil -} - -func (a Afero) Exists(path string) (bool, error) { - return Exists(a.Fs, path) -} - -// Check if a file or directory exists. -func Exists(fs Fs, path string) (bool, error) { - _, err := fs.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { - combinedPath := filepath.Join(basePathFs.path, relativePath) - if parent, ok := basePathFs.source.(*BasePathFs); ok { - return FullBaseFsPath(parent, combinedPath) - } - - return combinedPath -} diff --git a/vendor/github.com/spf13/cast/.gitignore b/vendor/github.com/spf13/cast/.gitignore deleted file mode 100644 index 53053a8ac5..0000000000 --- a/vendor/github.com/spf13/cast/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test - -*.bench diff --git a/vendor/github.com/spf13/cast/LICENSE b/vendor/github.com/spf13/cast/LICENSE deleted file mode 100644 index 4527efb9c0..0000000000 --- a/vendor/github.com/spf13/cast/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Steve Francia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/cast/Makefile b/vendor/github.com/spf13/cast/Makefile deleted file mode 100644 index f01a5dbb6e..0000000000 --- a/vendor/github.com/spf13/cast/Makefile +++ /dev/null @@ -1,40 +0,0 @@ -GOVERSION := $(shell go version | cut -d ' ' -f 3 | cut -d '.' -f 2) - -.PHONY: check fmt lint test test-race vet test-cover-html help -.DEFAULT_GOAL := help - -check: test-race fmt vet lint ## Run tests and linters - -test: ## Run tests - go test ./... - -test-race: ## Run tests with race detector - go test -race ./... - -fmt: ## Run gofmt linter -ifeq "$(GOVERSION)" "12" - @for d in `go list` ; do \ - if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \ - echo "^ improperly formatted go files" && echo && exit 1; \ - fi \ - done -endif - -lint: ## Run golint linter - @for d in `go list` ; do \ - if [ "`golint $$d | tee /dev/stderr`" ]; then \ - echo "^ golint errors!" && echo && exit 1; \ - fi \ - done - -vet: ## Run go vet linter - @if [ "`go vet | tee /dev/stderr`" ]; then \ - echo "^ go vet errors!" && echo && exit 1; \ - fi - -test-cover-html: ## Generate test coverage report - go test -coverprofile=coverage.out -covermode=count - go tool cover -func=coverage.out - -help: - @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md deleted file mode 100644 index 0e9e145935..0000000000 --- a/vendor/github.com/spf13/cast/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# cast - -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/cast/actions/workflows/ci.yaml) -[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/cast)](https://pkg.go.dev/mod/github.com/spf13/cast) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.16-61CFDD.svg?style=flat-square) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast?style=flat-square)](https://goreportcard.com/report/github.com/spf13/cast) - -Easy and safe casting from one type to another in Go - -Don’t Panic! ... Cast - -## What is Cast? - -Cast is a library to convert between different go types in a consistent and easy way. - -Cast provides simple functions to easily convert a number to a string, an -interface into a bool, etc. Cast does this intelligently when an obvious -conversion is possible. It doesn’t make any attempts to guess what you meant, -for example you can only convert a string to an int when it is a string -representation of an int such as “8”. Cast was developed for use in -[Hugo](https://gohugo.io), a website engine which uses YAML, TOML or JSON -for meta data. - -## Why use Cast? - -When working with dynamic data in Go you often need to cast or convert the data -from one type into another. Cast goes beyond just using type assertion (though -it uses that when possible) to provide a very straightforward and convenient -library. - -If you are working with interfaces to handle things like dynamic content -you’ll need an easy way to convert an interface into a given type. This -is the library for you. - -If you are taking in data from YAML, TOML or JSON or other formats which lack -full types, then Cast is the library for you. - -## Usage - -Cast provides a handful of To_____ methods. These methods will always return -the desired type. **If input is provided that will not convert to that type, the -0 or nil value for that type will be returned**. - -Cast also provides identical methods To_____E. These return the same result as -the To_____ methods, plus an additional error which tells you if it successfully -converted. Using these methods you can tell the difference between when the -input matched the zero value or when the conversion failed and the zero value -was returned. - -The following examples are merely a sample of what is available. Please review -the code for a complete set. - -### Example ‘ToString’: - - cast.ToString("mayonegg") // "mayonegg" - cast.ToString(8) // "8" - cast.ToString(8.31) // "8.31" - cast.ToString([]byte("one time")) // "one time" - cast.ToString(nil) // "" - - var foo interface{} = "one more time" - cast.ToString(foo) // "one more time" - - -### Example ‘ToInt’: - - cast.ToInt(8) // 8 - cast.ToInt(8.31) // 8 - cast.ToInt("8") // 8 - cast.ToInt(true) // 1 - cast.ToInt(false) // 0 - - var eight interface{} = 8 - cast.ToInt(eight) // 8 - cast.ToInt(nil) // 0 diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go deleted file mode 100644 index 0cfe9418de..0000000000 --- a/vendor/github.com/spf13/cast/cast.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -// Package cast provides easy and safe casting in Go. -package cast - -import "time" - -// ToBool casts an interface to a bool type. -func ToBool(i interface{}) bool { - v, _ := ToBoolE(i) - return v -} - -// ToTime casts an interface to a time.Time type. -func ToTime(i interface{}) time.Time { - v, _ := ToTimeE(i) - return v -} - -func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time { - v, _ := ToTimeInDefaultLocationE(i, location) - return v -} - -// ToDuration casts an interface to a time.Duration type. -func ToDuration(i interface{}) time.Duration { - v, _ := ToDurationE(i) - return v -} - -// ToFloat64 casts an interface to a float64 type. -func ToFloat64(i interface{}) float64 { - v, _ := ToFloat64E(i) - return v -} - -// ToFloat32 casts an interface to a float32 type. -func ToFloat32(i interface{}) float32 { - v, _ := ToFloat32E(i) - return v -} - -// ToInt64 casts an interface to an int64 type. -func ToInt64(i interface{}) int64 { - v, _ := ToInt64E(i) - return v -} - -// ToInt32 casts an interface to an int32 type. -func ToInt32(i interface{}) int32 { - v, _ := ToInt32E(i) - return v -} - -// ToInt16 casts an interface to an int16 type. -func ToInt16(i interface{}) int16 { - v, _ := ToInt16E(i) - return v -} - -// ToInt8 casts an interface to an int8 type. -func ToInt8(i interface{}) int8 { - v, _ := ToInt8E(i) - return v -} - -// ToInt casts an interface to an int type. -func ToInt(i interface{}) int { - v, _ := ToIntE(i) - return v -} - -// ToUint casts an interface to a uint type. -func ToUint(i interface{}) uint { - v, _ := ToUintE(i) - return v -} - -// ToUint64 casts an interface to a uint64 type. -func ToUint64(i interface{}) uint64 { - v, _ := ToUint64E(i) - return v -} - -// ToUint32 casts an interface to a uint32 type. -func ToUint32(i interface{}) uint32 { - v, _ := ToUint32E(i) - return v -} - -// ToUint16 casts an interface to a uint16 type. -func ToUint16(i interface{}) uint16 { - v, _ := ToUint16E(i) - return v -} - -// ToUint8 casts an interface to a uint8 type. -func ToUint8(i interface{}) uint8 { - v, _ := ToUint8E(i) - return v -} - -// ToString casts an interface to a string type. -func ToString(i interface{}) string { - v, _ := ToStringE(i) - return v -} - -// ToStringMapString casts an interface to a map[string]string type. -func ToStringMapString(i interface{}) map[string]string { - v, _ := ToStringMapStringE(i) - return v -} - -// ToStringMapStringSlice casts an interface to a map[string][]string type. -func ToStringMapStringSlice(i interface{}) map[string][]string { - v, _ := ToStringMapStringSliceE(i) - return v -} - -// ToStringMapBool casts an interface to a map[string]bool type. -func ToStringMapBool(i interface{}) map[string]bool { - v, _ := ToStringMapBoolE(i) - return v -} - -// ToStringMapInt casts an interface to a map[string]int type. -func ToStringMapInt(i interface{}) map[string]int { - v, _ := ToStringMapIntE(i) - return v -} - -// ToStringMapInt64 casts an interface to a map[string]int64 type. -func ToStringMapInt64(i interface{}) map[string]int64 { - v, _ := ToStringMapInt64E(i) - return v -} - -// ToStringMap casts an interface to a map[string]interface{} type. -func ToStringMap(i interface{}) map[string]interface{} { - v, _ := ToStringMapE(i) - return v -} - -// ToSlice casts an interface to a []interface{} type. -func ToSlice(i interface{}) []interface{} { - v, _ := ToSliceE(i) - return v -} - -// ToBoolSlice casts an interface to a []bool type. -func ToBoolSlice(i interface{}) []bool { - v, _ := ToBoolSliceE(i) - return v -} - -// ToStringSlice casts an interface to a []string type. -func ToStringSlice(i interface{}) []string { - v, _ := ToStringSliceE(i) - return v -} - -// ToIntSlice casts an interface to a []int type. -func ToIntSlice(i interface{}) []int { - v, _ := ToIntSliceE(i) - return v -} - -// ToDurationSlice casts an interface to a []time.Duration type. -func ToDurationSlice(i interface{}) []time.Duration { - v, _ := ToDurationSliceE(i) - return v -} diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go deleted file mode 100644 index d49bbf83ec..0000000000 --- a/vendor/github.com/spf13/cast/caste.go +++ /dev/null @@ -1,1498 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package cast - -import ( - "encoding/json" - "errors" - "fmt" - "html/template" - "reflect" - "strconv" - "strings" - "time" -) - -var errNegativeNotAllowed = errors.New("unable to cast negative value") - -// ToTimeE casts an interface to a time.Time type. -func ToTimeE(i interface{}) (tim time.Time, err error) { - return ToTimeInDefaultLocationE(i, time.UTC) -} - -// ToTimeInDefaultLocationE casts an empty interface to time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.Time, err error) { - i = indirect(i) - - switch v := i.(type) { - case time.Time: - return v, nil - case string: - return StringToDateInDefaultLocation(v, location) - case json.Number: - s, err1 := ToInt64E(v) - if err1 != nil { - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } - return time.Unix(s, 0), nil - case int: - return time.Unix(int64(v), 0), nil - case int64: - return time.Unix(v, 0), nil - case int32: - return time.Unix(int64(v), 0), nil - case uint: - return time.Unix(int64(v), 0), nil - case uint64: - return time.Unix(int64(v), 0), nil - case uint32: - return time.Unix(int64(v), 0), nil - default: - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } -} - -// ToDurationE casts an interface to a time.Duration type. -func ToDurationE(i interface{}) (d time.Duration, err error) { - i = indirect(i) - - switch s := i.(type) { - case time.Duration: - return s, nil - case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8: - d = time.Duration(ToInt64(s)) - return - case float32, float64: - d = time.Duration(ToFloat64(s)) - return - case string: - if strings.ContainsAny(s, "nsuµmh") { - d, err = time.ParseDuration(s) - } else { - d, err = time.ParseDuration(s + "ns") - } - return - case json.Number: - var v float64 - v, err = s.Float64() - d = time.Duration(v) - return - default: - err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) - return - } -} - -// ToBoolE casts an interface to a bool type. -func ToBoolE(i interface{}) (bool, error) { - i = indirect(i) - - switch b := i.(type) { - case bool: - return b, nil - case nil: - return false, nil - case int: - return b != 0, nil - case int64: - return b != 0, nil - case int32: - return b != 0, nil - case int16: - return b != 0, nil - case int8: - return b != 0, nil - case uint: - return b != 0, nil - case uint64: - return b != 0, nil - case uint32: - return b != 0, nil - case uint16: - return b != 0, nil - case uint8: - return b != 0, nil - case float64: - return b != 0, nil - case float32: - return b != 0, nil - case time.Duration: - return b != 0, nil - case string: - return strconv.ParseBool(i.(string)) - case json.Number: - v, err := ToInt64E(b) - if err == nil { - return v != 0, nil - } - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - default: - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - } -} - -// ToFloat64E casts an interface to a float64 type. -func ToFloat64E(i interface{}) (float64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return float64(intv), nil - } - - switch s := i.(type) { - case float64: - return s, nil - case float32: - return float64(s), nil - case int64: - return float64(s), nil - case int32: - return float64(s), nil - case int16: - return float64(s), nil - case int8: - return float64(s), nil - case uint: - return float64(s), nil - case uint64: - return float64(s), nil - case uint32: - return float64(s), nil - case uint16: - return float64(s), nil - case uint8: - return float64(s), nil - case string: - v, err := strconv.ParseFloat(s, 64) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case json.Number: - v, err := s.Float64() - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - } -} - -// ToFloat32E casts an interface to a float32 type. -func ToFloat32E(i interface{}) (float32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return float32(intv), nil - } - - switch s := i.(type) { - case float64: - return float32(s), nil - case float32: - return s, nil - case int64: - return float32(s), nil - case int32: - return float32(s), nil - case int16: - return float32(s), nil - case int8: - return float32(s), nil - case uint: - return float32(s), nil - case uint64: - return float32(s), nil - case uint32: - return float32(s), nil - case uint16: - return float32(s), nil - case uint8: - return float32(s), nil - case string: - v, err := strconv.ParseFloat(s, 32) - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case json.Number: - v, err := s.Float64() - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - } -} - -// ToInt64E casts an interface to an int64 type. -func ToInt64E(i interface{}) (int64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int64(intv), nil - } - - switch s := i.(type) { - case int64: - return s, nil - case int32: - return int64(s), nil - case int16: - return int64(s), nil - case int8: - return int64(s), nil - case uint: - return int64(s), nil - case uint64: - return int64(s), nil - case uint32: - return int64(s), nil - case uint16: - return int64(s), nil - case uint8: - return int64(s), nil - case float64: - return int64(s), nil - case float32: - return int64(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case json.Number: - return ToInt64E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - } -} - -// ToInt32E casts an interface to an int32 type. -func ToInt32E(i interface{}) (int32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int32(intv), nil - } - - switch s := i.(type) { - case int64: - return int32(s), nil - case int32: - return s, nil - case int16: - return int32(s), nil - case int8: - return int32(s), nil - case uint: - return int32(s), nil - case uint64: - return int32(s), nil - case uint32: - return int32(s), nil - case uint16: - return int32(s), nil - case uint8: - return int32(s), nil - case float64: - return int32(s), nil - case float32: - return int32(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - case json.Number: - return ToInt32E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - } -} - -// ToInt16E casts an interface to an int16 type. -func ToInt16E(i interface{}) (int16, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int16(intv), nil - } - - switch s := i.(type) { - case int64: - return int16(s), nil - case int32: - return int16(s), nil - case int16: - return s, nil - case int8: - return int16(s), nil - case uint: - return int16(s), nil - case uint64: - return int16(s), nil - case uint32: - return int16(s), nil - case uint16: - return int16(s), nil - case uint8: - return int16(s), nil - case float64: - return int16(s), nil - case float32: - return int16(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - case json.Number: - return ToInt16E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - } -} - -// ToInt8E casts an interface to an int8 type. -func ToInt8E(i interface{}) (int8, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int8(intv), nil - } - - switch s := i.(type) { - case int64: - return int8(s), nil - case int32: - return int8(s), nil - case int16: - return int8(s), nil - case int8: - return s, nil - case uint: - return int8(s), nil - case uint64: - return int8(s), nil - case uint32: - return int8(s), nil - case uint16: - return int8(s), nil - case uint8: - return int8(s), nil - case float64: - return int8(s), nil - case float32: - return int8(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - case json.Number: - return ToInt8E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - } -} - -// ToIntE casts an interface to an int type. -func ToIntE(i interface{}) (int, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return intv, nil - } - - switch s := i.(type) { - case int64: - return int(s), nil - case int32: - return int(s), nil - case int16: - return int(s), nil - case int8: - return int(s), nil - case uint: - return int(s), nil - case uint64: - return int(s), nil - case uint32: - return int(s), nil - case uint16: - return int(s), nil - case uint8: - return int(s), nil - case float64: - return int(s), nil - case float32: - return int(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case json.Number: - return ToIntE(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) - } -} - -// ToUintE casts an interface to a uint type. -func ToUintE(i interface{}) (uint, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - case json.Number: - return ToUintE(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case uint: - return s, nil - case uint64: - return uint(s), nil - case uint32: - return uint(s), nil - case uint16: - return uint(s), nil - case uint8: - return uint(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - } -} - -// ToUint64E casts an interface to a uint64 type. -func ToUint64E(i interface{}) (uint64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint64(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint64(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - case json.Number: - return ToUint64E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case uint: - return uint64(s), nil - case uint64: - return s, nil - case uint32: - return uint64(s), nil - case uint16: - return uint64(s), nil - case uint8: - return uint64(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - } -} - -// ToUint32E casts an interface to a uint32 type. -func ToUint32E(i interface{}) (uint32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint32(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - case json.Number: - return ToUint32E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case uint: - return uint32(s), nil - case uint64: - return uint32(s), nil - case uint32: - return s, nil - case uint16: - return uint32(s), nil - case uint8: - return uint32(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - } -} - -// ToUint16E casts an interface to a uint16 type. -func ToUint16E(i interface{}) (uint16, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint16(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - case json.Number: - return ToUint16E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case uint: - return uint16(s), nil - case uint64: - return uint16(s), nil - case uint32: - return uint16(s), nil - case uint16: - return s, nil - case uint8: - return uint16(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - } -} - -// ToUint8E casts an interface to a uint type. -func ToUint8E(i interface{}) (uint8, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint8(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - case json.Number: - return ToUint8E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case uint: - return uint8(s), nil - case uint64: - return uint8(s), nil - case uint32: - return uint8(s), nil - case uint16: - return uint8(s), nil - case uint8: - return s, nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - } -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirect returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil). -func indirect(a interface{}) interface{} { - if a == nil { - return nil - } - if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr { - // Avoid creating a reflect.Value if it's not a pointer. - return a - } - v := reflect.ValueOf(a) - for v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirectToStringerOrError returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer -// or error, -func indirectToStringerOrError(a interface{}) interface{} { - if a == nil { - return nil - } - - var errorType = reflect.TypeOf((*error)(nil)).Elem() - var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() - - v := reflect.ValueOf(a) - for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// ToStringE casts an interface to a string type. -func ToStringE(i interface{}) (string, error) { - i = indirectToStringerOrError(i) - - switch s := i.(type) { - case string: - return s, nil - case bool: - return strconv.FormatBool(s), nil - case float64: - return strconv.FormatFloat(s, 'f', -1, 64), nil - case float32: - return strconv.FormatFloat(float64(s), 'f', -1, 32), nil - case int: - return strconv.Itoa(s), nil - case int64: - return strconv.FormatInt(s, 10), nil - case int32: - return strconv.Itoa(int(s)), nil - case int16: - return strconv.FormatInt(int64(s), 10), nil - case int8: - return strconv.FormatInt(int64(s), 10), nil - case uint: - return strconv.FormatUint(uint64(s), 10), nil - case uint64: - return strconv.FormatUint(uint64(s), 10), nil - case uint32: - return strconv.FormatUint(uint64(s), 10), nil - case uint16: - return strconv.FormatUint(uint64(s), 10), nil - case uint8: - return strconv.FormatUint(uint64(s), 10), nil - case json.Number: - return s.String(), nil - case []byte: - return string(s), nil - case template.HTML: - return string(s), nil - case template.URL: - return string(s), nil - case template.JS: - return string(s), nil - case template.CSS: - return string(s), nil - case template.HTMLAttr: - return string(s), nil - case nil: - return "", nil - case fmt.Stringer: - return s.String(), nil - case error: - return s.Error(), nil - default: - return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i) - } -} - -// ToStringMapStringE casts an interface to a map[string]string type. -func ToStringMapStringE(i interface{}) (map[string]string, error) { - var m = map[string]string{} - - switch v := i.(type) { - case map[string]string: - return v, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i) - } -} - -// ToStringMapStringSliceE casts an interface to a map[string][]string type. -func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { - var m = map[string][]string{} - - switch v := i.(type) { - case map[string][]string: - return v, nil - case map[string][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[string]string: - for k, val := range v { - m[ToString(k)] = []string{val} - } - case map[string]interface{}: - for k, val := range v { - switch vt := val.(type) { - case []interface{}: - m[ToString(k)] = ToStringSlice(vt) - case []string: - m[ToString(k)] = vt - default: - m[ToString(k)] = []string{ToString(val)} - } - } - return m, nil - case map[interface{}][]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - key, err := ToStringE(k) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - value, err := ToStringSliceE(val) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - m[key] = value - } - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - return m, nil -} - -// ToStringMapBoolE casts an interface to a map[string]bool type. -func ToStringMapBoolE(i interface{}) (map[string]bool, error) { - var m = map[string]bool{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]bool: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i) - } -} - -// ToStringMapE casts an interface to a map[string]interface{} type. -func ToStringMapE(i interface{}) (map[string]interface{}, error) { - var m = map[string]interface{}{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = val - } - return m, nil - case map[string]interface{}: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i) - } -} - -// ToStringMapIntE casts an interface to a map[string]int{} type. -func ToStringMapIntE(i interface{}) (map[string]int, error) { - var m = map[string]int{} - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToInt(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[k] = ToInt(val) - } - return m, nil - case map[string]int: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - } - - if reflect.TypeOf(i).Kind() != reflect.Map { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - - mVal := reflect.ValueOf(m) - v := reflect.ValueOf(i) - for _, keyVal := range v.MapKeys() { - val, err := ToIntE(v.MapIndex(keyVal).Interface()) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) - } - return m, nil -} - -// ToStringMapInt64E casts an interface to a map[string]int64{} type. -func ToStringMapInt64E(i interface{}) (map[string]int64, error) { - var m = map[string]int64{} - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToInt64(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[k] = ToInt64(val) - } - return m, nil - case map[string]int64: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - } - - if reflect.TypeOf(i).Kind() != reflect.Map { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - mVal := reflect.ValueOf(m) - v := reflect.ValueOf(i) - for _, keyVal := range v.MapKeys() { - val, err := ToInt64E(v.MapIndex(keyVal).Interface()) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) - } - return m, nil -} - -// ToSliceE casts an interface to a []interface{} type. -func ToSliceE(i interface{}) ([]interface{}, error) { - var s []interface{} - - switch v := i.(type) { - case []interface{}: - return append(s, v...), nil - case []map[string]interface{}: - for _, u := range v { - s = append(s, u) - } - return s, nil - default: - return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i) - } -} - -// ToBoolSliceE casts an interface to a []bool type. -func ToBoolSliceE(i interface{}) ([]bool, error) { - if i == nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - - switch v := i.(type) { - case []bool: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]bool, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToBoolE(s.Index(j).Interface()) - if err != nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - a[j] = val - } - return a, nil - default: - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } -} - -// ToStringSliceE casts an interface to a []string type. -func ToStringSliceE(i interface{}) ([]string, error) { - var a []string - - switch v := i.(type) { - case []interface{}: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []string: - return v, nil - case []int8: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case string: - return strings.Fields(v), nil - case []error: - for _, err := range i.([]error) { - a = append(a, err.Error()) - } - return a, nil - case interface{}: - str, err := ToStringE(v) - if err != nil { - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } - return []string{str}, nil - default: - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } -} - -// ToIntSliceE casts an interface to a []int type. -func ToIntSliceE(i interface{}) ([]int, error) { - if i == nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - - switch v := i.(type) { - case []int: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]int, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToIntE(s.Index(j).Interface()) - if err != nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - a[j] = val - } - return a, nil - default: - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } -} - -// ToDurationSliceE casts an interface to a []time.Duration type. -func ToDurationSliceE(i interface{}) ([]time.Duration, error) { - if i == nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - - switch v := i.(type) { - case []time.Duration: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]time.Duration, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToDurationE(s.Index(j).Interface()) - if err != nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - a[j] = val - } - return a, nil - default: - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } -} - -// StringToDate attempts to parse a string into a time.Time type using a -// predefined list of formats. If no suitable format is found, an error is -// returned. -func StringToDate(s string) (time.Time, error) { - return parseDateWith(s, time.UTC, timeFormats) -} - -// StringToDateInDefaultLocation casts an empty interface to a time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { - return parseDateWith(s, location, timeFormats) -} - -type timeFormatType int - -const ( - timeFormatNoTimezone timeFormatType = iota - timeFormatNamedTimezone - timeFormatNumericTimezone - timeFormatNumericAndNamedTimezone - timeFormatTimeOnly -) - -type timeFormat struct { - format string - typ timeFormatType -} - -func (f timeFormat) hasTimezone() bool { - // We don't include the formats with only named timezones, see - // https://github.com/golang/go/issues/19694#issuecomment-289103522 - return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone -} - -var ( - timeFormats = []timeFormat{ - // Keep common formats at the top. - {"2006-01-02", timeFormatNoTimezone}, - {time.RFC3339, timeFormatNumericTimezone}, - {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone - {time.RFC1123Z, timeFormatNumericTimezone}, - {time.RFC1123, timeFormatNamedTimezone}, - {time.RFC822Z, timeFormatNumericTimezone}, - {time.RFC822, timeFormatNamedTimezone}, - {time.RFC850, timeFormatNamedTimezone}, - {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() - {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon - {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon - {"2006-01-02 15:04:05", timeFormatNoTimezone}, - {time.ANSIC, timeFormatNoTimezone}, - {time.UnixDate, timeFormatNamedTimezone}, - {time.RubyDate, timeFormatNumericTimezone}, - {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, - {"02 Jan 2006", timeFormatNoTimezone}, - {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, - {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, - {time.Kitchen, timeFormatTimeOnly}, - {time.Stamp, timeFormatTimeOnly}, - {time.StampMilli, timeFormatTimeOnly}, - {time.StampMicro, timeFormatTimeOnly}, - {time.StampNano, timeFormatTimeOnly}, - } -) - -func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { - - for _, format := range formats { - if d, e = time.Parse(format.format, s); e == nil { - - // Some time formats have a zone name, but no offset, so it gets - // put in that zone name (not the default one passed in to us), but - // without that zone's offset. So set the location manually. - if format.typ <= timeFormatNamedTimezone { - if location == nil { - location = time.Local - } - year, month, day := d.Date() - hour, min, sec := d.Clock() - d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) - } - - return - } - } - return d, fmt.Errorf("unable to parse date: %s", s) -} - -// jsonStringToObject attempts to unmarshall a string as JSON into -// the object passed as pointer. -func jsonStringToObject(s string, v interface{}) error { - data := []byte(s) - return json.Unmarshal(data, v) -} - -// toInt returns the int value of v if v or v's underlying type -// is an int. -// Note that this will return false for int64 etc. types. -func toInt(v interface{}) (int, bool) { - switch v := v.(type) { - case int: - return v, true - case time.Weekday: - return int(v), true - case time.Month: - return int(v), true - default: - return 0, false - } -} - -func trimZeroDecimal(s string) string { - var foundZero bool - for i := len(s); i > 0; i-- { - switch s[i-1] { - case '.': - if foundZero { - return s[:i-1] - } - case '0': - foundZero = true - default: - return s - } - } - return s -} diff --git a/vendor/github.com/spf13/cast/timeformattype_string.go b/vendor/github.com/spf13/cast/timeformattype_string.go deleted file mode 100644 index 1524fc82ce..0000000000 --- a/vendor/github.com/spf13/cast/timeformattype_string.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by "stringer -type timeFormatType"; DO NOT EDIT. - -package cast - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[timeFormatNoTimezone-0] - _ = x[timeFormatNamedTimezone-1] - _ = x[timeFormatNumericTimezone-2] - _ = x[timeFormatNumericAndNamedTimezone-3] - _ = x[timeFormatTimeOnly-4] -} - -const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly" - -var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} - -func (i timeFormatType) String() string { - if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) { - return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]] -} diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore deleted file mode 100644 index c7b459e4dd..0000000000 --- a/vendor/github.com/spf13/cobra/.gitignore +++ /dev/null @@ -1,39 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore -# swap -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -# session -Session.vim -# temporary -.netrwhist -*~ -# auto-generated tag files -tags - -*.exe -cobra.test -bin - -.idea/ -*.iml diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml deleted file mode 100644 index a618ec24d8..0000000000 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2013-2023 The Cobra Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -run: - deadline: 5m - -linters: - disable-all: true - enable: - #- bodyclose - # - deadcode ! deprecated since v1.49.0; replaced by 'unused' - #- depguard - #- dogsled - #- dupl - - errcheck - #- exhaustive - #- funlen - - gas - #- gochecknoinits - - goconst - #- gocritic - #- gocyclo - #- gofmt - - goimports - - golint - #- gomnd - #- goprintffuncname - #- gosec - #- gosimple - - govet - - ineffassign - - interfacer - #- lll - - maligned - - megacheck - #- misspell - #- nakedret - #- noctx - #- nolintlint - #- rowserrcheck - #- scopelint - #- staticcheck - #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - #- stylecheck - #- typecheck - - unconvert - #- unparam - - unused - # - varcheck ! deprecated since v1.49.0; replaced by 'unused' - #- whitespace - fast: false diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap deleted file mode 100644 index 94ec53068a..0000000000 --- a/vendor/github.com/spf13/cobra/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -Steve Francia -Bjørn Erik Pedersen -Fabiano Franz diff --git a/vendor/github.com/spf13/cobra/CONDUCT.md b/vendor/github.com/spf13/cobra/CONDUCT.md deleted file mode 100644 index 9d16f88fd1..0000000000 --- a/vendor/github.com/spf13/cobra/CONDUCT.md +++ /dev/null @@ -1,37 +0,0 @@ -## Cobra User Contract - -### Versioning -Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release. - -### Backward Compatibility -We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released. - -### Deprecation -Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github. - -### CVE -Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one. - -### Communication -Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors. - -### Breaking Changes -Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra. - -There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version. - -Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release. - -Examples of breaking changes include: -- Removing or renaming exported constant, variable, type, or function. -- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc... - - Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing. - -There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging. - -### CI Testing -Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang. - -### Disclaimer -Changes to this document and the contents therein are at the discretion of the maintainers. -None of the contents of this document are legally binding in any way to the maintainers or the users. diff --git a/vendor/github.com/spf13/cobra/CONTRIBUTING.md b/vendor/github.com/spf13/cobra/CONTRIBUTING.md deleted file mode 100644 index 6f356e6a82..0000000000 --- a/vendor/github.com/spf13/cobra/CONTRIBUTING.md +++ /dev/null @@ -1,50 +0,0 @@ -# Contributing to Cobra - -Thank you so much for contributing to Cobra. We appreciate your time and help. -Here are some guidelines to help you get started. - -## Code of Conduct - -Be kind and respectful to the members of the community. Take time to educate -others who are seeking help. Harassment of any kind will not be tolerated. - -## Questions - -If you have questions regarding Cobra, feel free to ask it in the community -[#cobra Slack channel][cobra-slack] - -## Filing a bug or feature - -1. Before filing an issue, please check the existing issues to see if a - similar one was already opened. If there is one already opened, feel free - to comment on it. -1. If you believe you've found a bug, please provide detailed steps of - reproduction, the version of Cobra and anything else you believe will be - useful to help troubleshoot it (e.g. OS environment, environment variables, - etc...). Also state the current behavior vs. the expected behavior. -1. If you'd like to see a feature or an enhancement please open an issue with - a clear title and description of what the feature is and why it would be - beneficial to the project and its users. - -## Submitting changes - -1. CLA: Upon submitting a Pull Request (PR), contributors will be prompted to - sign a CLA. Please sign the CLA :slightly_smiling_face: -1. Tests: If you are submitting code, please ensure you have adequate tests - for the feature. Tests can be run via `go test ./...` or `make test`. -1. Since this is golang project, ensure the new code is properly formatted to - ensure code consistency. Run `make all`. - -### Quick steps to contribute - -1. Fork the project. -1. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) -1. Create your feature branch (`git checkout -b my-new-feature`) -1. Make changes and run tests (`make test`) -1. Add them to staging (`git add .`) -1. Commit your changes (`git commit -m 'Add some feature'`) -1. Push to the branch (`git push origin my-new-feature`) -1. Create new pull request - - -[cobra-slack]: https://gophers.slack.com/archives/CD3LP1199 diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt deleted file mode 100644 index 298f0e2665..0000000000 --- a/vendor/github.com/spf13/cobra/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/cobra/MAINTAINERS b/vendor/github.com/spf13/cobra/MAINTAINERS deleted file mode 100644 index 4c5ac3dd99..0000000000 --- a/vendor/github.com/spf13/cobra/MAINTAINERS +++ /dev/null @@ -1,13 +0,0 @@ -maintainers: -- spf13 -- johnSchnake -- jpmcb -- marckhouzam -inactive: -- anthonyfok -- bep -- bogem -- broady -- eparis -- jharshman -- wfernandes diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile deleted file mode 100644 index 0da8d7aa08..0000000000 --- a/vendor/github.com/spf13/cobra/Makefile +++ /dev/null @@ -1,35 +0,0 @@ -BIN="./bin" -SRC=$(shell find . -name "*.go") - -ifeq (, $(shell which golangci-lint)) -$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh") -endif - -.PHONY: fmt lint test install_deps clean - -default: all - -all: fmt test - -fmt: - $(info ******************** checking formatting ********************) - @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1) - -lint: - $(info ******************** running lint tools ********************) - golangci-lint run -v - -test: install_deps - $(info ******************** running tests ********************) - go test -v ./... - -richtest: install_deps - $(info ******************** running tests with kyoh86/richgo ********************) - richgo test -v ./... - -install_deps: - $(info ******************** downloading dependencies ********************) - go get -v ./... - -clean: - rm -rf $(BIN) diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md deleted file mode 100644 index 6444f4b7f6..0000000000 --- a/vendor/github.com/spf13/cobra/README.md +++ /dev/null @@ -1,112 +0,0 @@ -![cobra logo](assets/CobraMain.png) - -Cobra is a library for creating powerful modern CLI applications. - -Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), -[Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to -name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra. - -[![](https://img.shields.io/github/actions/workflow/status/spf13/cobra/test.yml?branch=main&longCache=true&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) -[![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) -[![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) - -# Overview - -Cobra is a library providing a simple interface to create powerful modern CLI -interfaces similar to git & go tools. - -Cobra provides: -* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. -* Fully POSIX-compliant flags (including short & long versions) -* Nested subcommands -* Global, local and cascading flags -* Intelligent suggestions (`app srver`... did you mean `app server`?) -* Automatic help generation for commands and flags -* Grouping help for subcommands -* Automatic help flag recognition of `-h`, `--help`, etc. -* Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell) -* Automatically generated man pages for your application -* Command aliases so you can change things without breaking them -* The flexibility to define your own help, usage, etc. -* Optional seamless integration with [viper](https://github.com/spf13/viper) for 12-factor apps - -# Concepts - -Cobra is built on a structure of commands, arguments & flags. - -**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. - -The best applications read like sentences when used, and as a result, users -intuitively know how to interact with them. - -The pattern to follow is -`APPNAME VERB NOUN --ADJECTIVE` - or -`APPNAME COMMAND ARG --FLAG`. - -A few good real world examples may better illustrate this point. - -In the following example, 'server' is a command, and 'port' is a flag: - - hugo server --port=1313 - -In this command we are telling Git to clone the url bare. - - git clone URL --bare - -## Commands - -Command is the central point of the application. Each interaction that -the application supports will be contained in a Command. A command can -have children commands and optionally run an action. - -In the example above, 'server' is the command. - -[More about cobra.Command](https://pkg.go.dev/github.com/spf13/cobra#Command) - -## Flags - -A flag is a way to modify the behavior of a command. Cobra supports -fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). -A Cobra command can define flags that persist through to children commands -and flags that are only available to that command. - -In the example above, 'port' is the flag. - -Flag functionality is provided by the [pflag -library](https://github.com/spf13/pflag), a fork of the flag standard library -which maintains the same interface while adding POSIX compliance. - -# Installing -Using Cobra is easy. First, use `go get` to install the latest version -of the library. - -``` -go get -u github.com/spf13/cobra@latest -``` - -Next, include Cobra in your application: - -```go -import "github.com/spf13/cobra" -``` - -# Usage -`cobra-cli` is a command line program to generate cobra applications and command files. -It will bootstrap your application scaffolding to rapidly -develop a Cobra-based application. It is the easiest way to incorporate Cobra into your application. - -It can be installed by running: - -``` -go install github.com/spf13/cobra-cli@latest -``` - -For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) - -For complete details on using the Cobra library, please read the [The Cobra User Guide](site/content/user_guide.md). - -# License - -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](LICENSE.txt) diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go deleted file mode 100644 index 5f965e057f..0000000000 --- a/vendor/github.com/spf13/cobra/active_help.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cobra - -import ( - "fmt" - "os" - "regexp" - "strings" -) - -const ( - activeHelpMarker = "_activeHelp_ " - // The below values should not be changed: programs will be using them explicitly - // in their user documentation, and users will be using them explicitly. - activeHelpEnvVarSuffix = "_ACTIVE_HELP" - activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP" - activeHelpGlobalDisable = "0" -) - -var activeHelpEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) - -// AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp. -// Such strings will be processed by the completion script and will be shown as ActiveHelp -// to the user. -// The array parameter should be the array that will contain the completions. -// This function can be called multiple times before and/or after completions are added to -// the array. Each time this function is called with the same array, the new -// ActiveHelp line will be shown below the previous ones when completion is triggered. -func AppendActiveHelp(compArray []string, activeHelpStr string) []string { - return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr)) -} - -// GetActiveHelpConfig returns the value of the ActiveHelp environment variable -// _ACTIVE_HELP where is the name of the root command in upper -// case, with all non-ASCII-alphanumeric characters replaced by `_`. -// It will always return "0" if the global environment variable COBRA_ACTIVE_HELP -// is set to "0". -func GetActiveHelpConfig(cmd *Command) string { - activeHelpCfg := os.Getenv(activeHelpGlobalEnvVar) - if activeHelpCfg != activeHelpGlobalDisable { - activeHelpCfg = os.Getenv(activeHelpEnvVar(cmd.Root().Name())) - } - return activeHelpCfg -} - -// activeHelpEnvVar returns the name of the program-specific ActiveHelp environment -// variable. It has the format _ACTIVE_HELP where is the name of the -// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. -func activeHelpEnvVar(name string) string { - // This format should not be changed: users will be using it explicitly. - activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) - activeHelpEnvVar = activeHelpEnvVarPrefixSubstRegexp.ReplaceAllString(activeHelpEnvVar, "_") - return activeHelpEnvVar -} diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go deleted file mode 100644 index e79ec33a81..0000000000 --- a/vendor/github.com/spf13/cobra/args.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cobra - -import ( - "fmt" - "strings" -) - -type PositionalArgs func(cmd *Command, args []string) error - -// legacyArgs validation has the following behaviour: -// - root commands with no subcommands can take arbitrary arguments -// - root commands with subcommands will do subcommand validity checking -// - subcommands will always accept arbitrary arguments -func legacyArgs(cmd *Command, args []string) error { - // no subcommand, always take args - if !cmd.HasSubCommands() { - return nil - } - - // root command with subcommands, do subcommand checking. - if !cmd.HasParent() && len(args) > 0 { - return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0])) - } - return nil -} - -// NoArgs returns an error if any args are included. -func NoArgs(cmd *Command, args []string) error { - if len(args) > 0 { - return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath()) - } - return nil -} - -// OnlyValidArgs returns an error if there are any positional args that are not in -// the `ValidArgs` field of `Command` -func OnlyValidArgs(cmd *Command, args []string) error { - if len(cmd.ValidArgs) > 0 { - // Remove any description that may be included in ValidArgs. - // A description is following a tab character. - var validArgs []string - for _, v := range cmd.ValidArgs { - validArgs = append(validArgs, strings.Split(v, "\t")[0]) - } - for _, v := range args { - if !stringInSlice(v, validArgs) { - return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0])) - } - } - } - return nil -} - -// ArbitraryArgs never returns an error. -func ArbitraryArgs(cmd *Command, args []string) error { - return nil -} - -// MinimumNArgs returns an error if there is not at least N args. -func MinimumNArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) < n { - return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args)) - } - return nil - } -} - -// MaximumNArgs returns an error if there are more than N args. -func MaximumNArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) > n { - return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args)) - } - return nil - } -} - -// ExactArgs returns an error if there are not exactly n args. -func ExactArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) != n { - return fmt.Errorf("accepts %d arg(s), received %d", n, len(args)) - } - return nil - } -} - -// RangeArgs returns an error if the number of args is not within the expected range. -func RangeArgs(min int, max int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) < min || len(args) > max { - return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args)) - } - return nil - } -} - -// MatchAll allows combining several PositionalArgs to work in concert. -func MatchAll(pargs ...PositionalArgs) PositionalArgs { - return func(cmd *Command, args []string) error { - for _, parg := range pargs { - if err := parg(cmd, args); err != nil { - return err - } - } - return nil - } -} - -// ExactValidArgs returns an error if there are not exactly N positional args OR -// there are any positional args that are not in the `ValidArgs` field of `Command` -// -// Deprecated: use MatchAll(ExactArgs(n), OnlyValidArgs) instead -func ExactValidArgs(n int) PositionalArgs { - return MatchAll(ExactArgs(n), OnlyValidArgs) -} diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go deleted file mode 100644 index 8a53151840..0000000000 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ /dev/null @@ -1,712 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "sort" - "strings" - - "github.com/spf13/pflag" -) - -// Annotations for Bash completion. -const ( - BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions" - BashCompCustom = "cobra_annotation_bash_completion_custom" - BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" - BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" -) - -func writePreamble(buf io.StringWriter, name string) { - WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) - WriteStringAndCheck(buf, fmt.Sprintf(` -__%[1]s_debug() -{ - if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then - echo "$*" >> "${BASH_COMP_DEBUG_FILE}" - fi -} - -# Homebrew on Macs have version 1.3 of bash-completion which doesn't include -# _init_completion. This is a very minimal version of that function. -__%[1]s_init_completion() -{ - COMPREPLY=() - _get_comp_words_by_ref "$@" cur prev words cword -} - -__%[1]s_index_of_word() -{ - local w word=$1 - shift - index=0 - for w in "$@"; do - [[ $w = "$word" ]] && return - index=$((index+1)) - done - index=-1 -} - -__%[1]s_contains_word() -{ - local w word=$1; shift - for w in "$@"; do - [[ $w = "$word" ]] && return - done - return 1 -} - -__%[1]s_handle_go_custom_completion() -{ - __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}" - - local shellCompDirectiveError=%[3]d - local shellCompDirectiveNoSpace=%[4]d - local shellCompDirectiveNoFileComp=%[5]d - local shellCompDirectiveFilterFileExt=%[6]d - local shellCompDirectiveFilterDirs=%[7]d - - local out requestComp lastParam lastChar comp directive args - - # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows handling aliases - args=("${words[@]:1}") - # Disable ActiveHelp which is not supported for bash completion v1 - requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}" - - lastParam=${words[$((${#words[@]}-1))]} - lastChar=${lastParam:$((${#lastParam}-1)):1} - __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}" - - if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then - # If the last parameter is complete (there is a space following it) - # We add an extra empty parameter so we can indicate this to the go method. - __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter" - requestComp="${requestComp} \"\"" - fi - - __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}" - # Use eval to handle any environment variables and such - out=$(eval "${requestComp}" 2>/dev/null) - - # Extract the directive integer at the very end of the output following a colon (:) - directive=${out##*:} - # Remove the directive - out=${out%%:*} - if [ "${directive}" = "${out}" ]; then - # There is not directive specified - directive=0 - fi - __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}" - __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out}" - - if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then - # Error code. No completion. - __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code" - return - else - if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then - __%[1]s_debug "${FUNCNAME[0]}: activating no space" - compopt -o nospace - fi - fi - if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then - __%[1]s_debug "${FUNCNAME[0]}: activating no file completion" - compopt +o default - fi - fi - fi - - if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then - # File extension filtering - local fullFilter filter filteringCmd - # Do not use quotes around the $out variable or else newline - # characters will be kept. - for filter in ${out}; do - fullFilter+="$filter|" - done - - filteringCmd="_filedir $fullFilter" - __%[1]s_debug "File filtering command: $filteringCmd" - $filteringCmd - elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then - # File completion for directories only - local subdir - # Use printf to strip any trailing newline - subdir=$(printf "%%s" "${out}") - if [ -n "$subdir" ]; then - __%[1]s_debug "Listing directories in $subdir" - __%[1]s_handle_subdirs_in_dir_flag "$subdir" - else - __%[1]s_debug "Listing directories in ." - _filedir -d - fi - else - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${out}" -- "$cur") - fi -} - -__%[1]s_handle_reply() -{ - __%[1]s_debug "${FUNCNAME[0]}" - local comp - case $cur in - -*) - if [[ $(type -t compopt) = "builtin" ]]; then - compopt -o nospace - fi - local allflags - if [ ${#must_have_one_flag[@]} -ne 0 ]; then - allflags=("${must_have_one_flag[@]}") - else - allflags=("${flags[*]} ${two_word_flags[*]}") - fi - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${allflags[*]}" -- "$cur") - if [[ $(type -t compopt) = "builtin" ]]; then - [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace - fi - - # complete after --flag=abc - if [[ $cur == *=* ]]; then - if [[ $(type -t compopt) = "builtin" ]]; then - compopt +o nospace - fi - - local index flag - flag="${cur%%=*}" - __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}" - COMPREPLY=() - if [[ ${index} -ge 0 ]]; then - PREFIX="" - cur="${cur#*=}" - ${flags_completion[${index}]} - if [ -n "${ZSH_VERSION:-}" ]; then - # zsh completion needs --flag= prefix - eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" - fi - fi - fi - - if [[ -z "${flag_parsing_disabled}" ]]; then - # If flag parsing is enabled, we have completed the flags and can return. - # If flag parsing is disabled, we may not know all (or any) of the flags, so we fallthrough - # to possibly call handle_go_custom_completion. - return 0; - fi - ;; - esac - - # check if we are handling a flag with special work handling - local index - __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}" - if [[ ${index} -ge 0 ]]; then - ${flags_completion[${index}]} - return - fi - - # we are parsing a flag and don't have a special handler, no completion - if [[ ${cur} != "${words[cword]}" ]]; then - return - fi - - local completions - completions=("${commands[@]}") - if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then - completions+=("${must_have_one_noun[@]}") - elif [[ -n "${has_completion_function}" ]]; then - # if a go completion function is provided, defer to that function - __%[1]s_handle_go_custom_completion - fi - if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then - completions+=("${must_have_one_flag[@]}") - fi - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${completions[*]}" -- "$cur") - - if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${noun_aliases[*]}" -- "$cur") - fi - - if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - if declare -F __%[1]s_custom_func >/dev/null; then - # try command name qualified custom func - __%[1]s_custom_func - else - # otherwise fall back to unqualified for compatibility - declare -F __custom_func >/dev/null && __custom_func - fi - fi - - # available in bash-completion >= 2, not always present on macOS - if declare -F __ltrim_colon_completions >/dev/null; then - __ltrim_colon_completions "$cur" - fi - - # If there is only 1 completion and it is a flag with an = it will be completed - # but we don't want a space after the = - if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then - compopt -o nospace - fi -} - -# The arguments should be in the form "ext1|ext2|extn" -__%[1]s_handle_filename_extension_flag() -{ - local ext="$1" - _filedir "@(${ext})" -} - -__%[1]s_handle_subdirs_in_dir_flag() -{ - local dir="$1" - pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return -} - -__%[1]s_handle_flag() -{ - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - # if a command required a flag, and we found it, unset must_have_one_flag() - local flagname=${words[c]} - local flagvalue="" - # if the word contained an = - if [[ ${words[c]} == *"="* ]]; then - flagvalue=${flagname#*=} # take in as flagvalue after the = - flagname=${flagname%%=*} # strip everything after the = - flagname="${flagname}=" # but put the = back - fi - __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}" - if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then - must_have_one_flag=() - fi - - # if you set a flag which only applies to this command, don't show subcommands - if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then - commands=() - fi - - # keep flag value with flagname as flaghash - # flaghash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then - if [ -n "${flagvalue}" ] ; then - flaghash[${flagname}]=${flagvalue} - elif [ -n "${words[ $((c+1)) ]}" ] ; then - flaghash[${flagname}]=${words[ $((c+1)) ]} - else - flaghash[${flagname}]="true" # pad "true" for bool flag - fi - fi - - # skip the argument to a two word flag - if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then - __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" - c=$((c+1)) - # if we are looking for a flags value, don't show commands - if [[ $c -eq $cword ]]; then - commands=() - fi - fi - - c=$((c+1)) - -} - -__%[1]s_handle_noun() -{ - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then - must_have_one_noun=() - elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then - must_have_one_noun=() - fi - - nouns+=("${words[c]}") - c=$((c+1)) -} - -__%[1]s_handle_command() -{ - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - local next_command - if [[ -n ${last_command} ]]; then - next_command="_${last_command}_${words[c]//:/__}" - else - if [[ $c -eq 0 ]]; then - next_command="_%[1]s_root_command" - else - next_command="_${words[c]//:/__}" - fi - fi - c=$((c+1)) - __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}" - declare -F "$next_command" >/dev/null && $next_command -} - -__%[1]s_handle_word() -{ - if [[ $c -ge $cword ]]; then - __%[1]s_handle_reply - return - fi - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - if [[ "${words[c]}" == -* ]]; then - __%[1]s_handle_flag - elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then - __%[1]s_handle_command - elif [[ $c -eq 0 ]]; then - __%[1]s_handle_command - elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then - # aliashash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then - words[c]=${aliashash[${words[c]}]} - __%[1]s_handle_command - else - __%[1]s_handle_noun - fi - else - __%[1]s_handle_noun - fi - __%[1]s_handle_word -} - -`, name, ShellCompNoDescRequestCmd, - ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) -} - -func writePostscript(buf io.StringWriter, name string) { - name = strings.ReplaceAll(name, ":", "__") - WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name)) - WriteStringAndCheck(buf, fmt.Sprintf(`{ - local cur prev words cword split - declare -A flaghash 2>/dev/null || : - declare -A aliashash 2>/dev/null || : - if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -s || return - else - __%[1]s_init_completion -n "=" || return - fi - - local c=0 - local flag_parsing_disabled= - local flags=() - local two_word_flags=() - local local_nonpersistent_flags=() - local flags_with_completion=() - local flags_completion=() - local commands=("%[1]s") - local command_aliases=() - local must_have_one_flag=() - local must_have_one_noun=() - local has_completion_function="" - local last_command="" - local nouns=() - local noun_aliases=() - - __%[1]s_handle_word -} - -`, name)) - WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then - complete -o default -F __start_%s %s -else - complete -o default -o nospace -F __start_%s %s -fi - -`, name, name, name, name)) - WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n") -} - -func writeCommands(buf io.StringWriter, cmd *Command) { - WriteStringAndCheck(buf, " commands=()\n") - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() && c != cmd.helpCommand { - continue - } - WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name())) - writeCmdAliases(buf, c) - } - WriteStringAndCheck(buf, "\n") -} - -func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) { - for key, value := range annotations { - switch key { - case BashCompFilenameExt: - WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) - - var ext string - if len(value) > 0 { - ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|") - } else { - ext = "_filedir" - } - WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) - case BashCompCustom: - WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) - - if len(value) > 0 { - handlers := strings.Join(value, "; ") - WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) - } else { - WriteStringAndCheck(buf, " flags_completion+=(:)\n") - } - case BashCompSubdirsInDir: - WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) - - var ext string - if len(value) == 1 { - ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0] - } else { - ext = "_filedir -d" - } - WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) - } - } -} - -const cbn = "\")\n" - -func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { - name := flag.Shorthand - format := " " - if len(flag.NoOptDefVal) == 0 { - format += "two_word_" - } - format += "flags+=(\"-%s" + cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, name)) - writeFlagHandler(buf, "-"+name, flag.Annotations, cmd) -} - -func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { - name := flag.Name - format := " flags+=(\"--%s" - if len(flag.NoOptDefVal) == 0 { - format += "=" - } - format += cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, name)) - if len(flag.NoOptDefVal) == 0 { - format = " two_word_flags+=(\"--%s" + cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, name)) - } - writeFlagHandler(buf, "--"+name, flag.Annotations, cmd) -} - -func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { - name := flag.Name - format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn - if len(flag.NoOptDefVal) == 0 { - format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn - } - WriteStringAndCheck(buf, fmt.Sprintf(format, name)) - if len(flag.Shorthand) > 0 { - WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand)) - } -} - -// prepareCustomAnnotationsForFlags setup annotations for go completions for registered flags -func prepareCustomAnnotationsForFlags(cmd *Command) { - flagCompletionMutex.RLock() - defer flagCompletionMutex.RUnlock() - for flag := range flagCompletionFunctions { - // Make sure the completion script calls the __*_go_custom_completion function for - // every registered flag. We need to do this here (and not when the flag was registered - // for completion) so that we can know the root command name for the prefix - // of ___go_custom_completion - if flag.Annotations == nil { - flag.Annotations = map[string][]string{} - } - flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())} - } -} - -func writeFlags(buf io.StringWriter, cmd *Command) { - prepareCustomAnnotationsForFlags(cmd) - WriteStringAndCheck(buf, ` flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - -`) - - if cmd.DisableFlagParsing { - WriteStringAndCheck(buf, " flag_parsing_disabled=1\n") - } - - localNonPersistentFlags := cmd.LocalNonPersistentFlags() - cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - writeFlag(buf, flag, cmd) - if len(flag.Shorthand) > 0 { - writeShortFlag(buf, flag, cmd) - } - // localNonPersistentFlags are used to stop the completion of subcommands when one is set - // if TraverseChildren is true we should allow to complete subcommands - if localNonPersistentFlags.Lookup(flag.Name) != nil && !cmd.Root().TraverseChildren { - writeLocalNonPersistentFlag(buf, flag) - } - }) - cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - writeFlag(buf, flag, cmd) - if len(flag.Shorthand) > 0 { - writeShortFlag(buf, flag, cmd) - } - }) - - WriteStringAndCheck(buf, "\n") -} - -func writeRequiredFlag(buf io.StringWriter, cmd *Command) { - WriteStringAndCheck(buf, " must_have_one_flag=()\n") - flags := cmd.NonInheritedFlags() - flags.VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - if flag.Value.Type() != "bool" { - format += "=" - } - format += cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) - - if len(flag.Shorthand) > 0 { - WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) - } - } - } - }) -} - -func writeRequiredNouns(buf io.StringWriter, cmd *Command) { - WriteStringAndCheck(buf, " must_have_one_noun=()\n") - sort.Strings(cmd.ValidArgs) - for _, value := range cmd.ValidArgs { - // Remove any description that may be included following a tab character. - // Descriptions are not supported by bash completion. - value = strings.Split(value, "\t")[0] - WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) - } - if cmd.ValidArgsFunction != nil { - WriteStringAndCheck(buf, " has_completion_function=1\n") - } -} - -func writeCmdAliases(buf io.StringWriter, cmd *Command) { - if len(cmd.Aliases) == 0 { - return - } - - sort.Strings(cmd.Aliases) - - WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then`, "\n")) - for _, value := range cmd.Aliases { - WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value)) - WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) - } - WriteStringAndCheck(buf, ` fi`) - WriteStringAndCheck(buf, "\n") -} -func writeArgAliases(buf io.StringWriter, cmd *Command) { - WriteStringAndCheck(buf, " noun_aliases=()\n") - sort.Strings(cmd.ArgAliases) - for _, value := range cmd.ArgAliases { - WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value)) - } -} - -func gen(buf io.StringWriter, cmd *Command) { - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() && c != cmd.helpCommand { - continue - } - gen(buf, c) - } - commandName := cmd.CommandPath() - commandName = strings.ReplaceAll(commandName, " ", "_") - commandName = strings.ReplaceAll(commandName, ":", "__") - - if cmd.Root() == cmd { - WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName)) - } else { - WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName)) - } - - WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName)) - WriteStringAndCheck(buf, "\n") - WriteStringAndCheck(buf, " command_aliases=()\n") - WriteStringAndCheck(buf, "\n") - - writeCommands(buf, cmd) - writeFlags(buf, cmd) - writeRequiredFlag(buf, cmd) - writeRequiredNouns(buf, cmd) - writeArgAliases(buf, cmd) - WriteStringAndCheck(buf, "}\n\n") -} - -// GenBashCompletion generates bash completion file and writes to the passed writer. -func (c *Command) GenBashCompletion(w io.Writer) error { - buf := new(bytes.Buffer) - writePreamble(buf, c.Name()) - if len(c.BashCompletionFunction) > 0 { - buf.WriteString(c.BashCompletionFunction + "\n") - } - gen(buf, c) - writePostscript(buf, c.Name()) - - _, err := buf.WriteTo(w) - return err -} - -func nonCompletableFlag(flag *pflag.Flag) bool { - return flag.Hidden || len(flag.Deprecated) > 0 -} - -// GenBashCompletionFile generates bash completion file. -func (c *Command) GenBashCompletionFile(filename string) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenBashCompletion(outFile) -} diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go deleted file mode 100644 index 1cce5c329c..0000000000 --- a/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" -) - -func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error { - buf := new(bytes.Buffer) - genBashComp(buf, c.Name(), includeDesc) - _, err := buf.WriteTo(w) - return err -} - -func genBashComp(buf io.StringWriter, name string, includeDesc bool) { - compCmd := ShellCompRequestCmd - if !includeDesc { - compCmd = ShellCompNoDescRequestCmd - } - - WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*- - -__%[1]s_debug() -{ - if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then - echo "$*" >> "${BASH_COMP_DEBUG_FILE}" - fi -} - -# Macs have bash3 for which the bash-completion package doesn't include -# _init_completion. This is a minimal version of that function. -__%[1]s_init_completion() -{ - COMPREPLY=() - _get_comp_words_by_ref "$@" cur prev words cword -} - -# This function calls the %[1]s program to obtain the completion -# results and the directive. It fills the 'out' and 'directive' vars. -__%[1]s_get_completion_results() { - local requestComp lastParam lastChar args - - # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows handling aliases - args=("${words[@]:1}") - requestComp="${words[0]} %[2]s ${args[*]}" - - lastParam=${words[$((${#words[@]}-1))]} - lastChar=${lastParam:$((${#lastParam}-1)):1} - __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}" - - if [[ -z ${cur} && ${lastChar} != = ]]; then - # If the last parameter is complete (there is a space following it) - # We add an extra empty parameter so we can indicate this to the go method. - __%[1]s_debug "Adding extra empty parameter" - requestComp="${requestComp} ''" - fi - - # When completing a flag with an = (e.g., %[1]s -n=) - # bash focuses on the part after the =, so we need to remove - # the flag part from $cur - if [[ ${cur} == -*=* ]]; then - cur="${cur#*=}" - fi - - __%[1]s_debug "Calling ${requestComp}" - # Use eval to handle any environment variables and such - out=$(eval "${requestComp}" 2>/dev/null) - - # Extract the directive integer at the very end of the output following a colon (:) - directive=${out##*:} - # Remove the directive - out=${out%%:*} - if [[ ${directive} == "${out}" ]]; then - # There is not directive specified - directive=0 - fi - __%[1]s_debug "The completion directive is: ${directive}" - __%[1]s_debug "The completions are: ${out}" -} - -__%[1]s_process_completion_results() { - local shellCompDirectiveError=%[3]d - local shellCompDirectiveNoSpace=%[4]d - local shellCompDirectiveNoFileComp=%[5]d - local shellCompDirectiveFilterFileExt=%[6]d - local shellCompDirectiveFilterDirs=%[7]d - local shellCompDirectiveKeepOrder=%[8]d - - if (((directive & shellCompDirectiveError) != 0)); then - # Error code. No completion. - __%[1]s_debug "Received error from custom completion go code" - return - else - if (((directive & shellCompDirectiveNoSpace) != 0)); then - if [[ $(type -t compopt) == builtin ]]; then - __%[1]s_debug "Activating no space" - compopt -o nospace - else - __%[1]s_debug "No space directive not supported in this version of bash" - fi - fi - if (((directive & shellCompDirectiveKeepOrder) != 0)); then - if [[ $(type -t compopt) == builtin ]]; then - # no sort isn't supported for bash less than < 4.4 - if [[ ${BASH_VERSINFO[0]} -lt 4 || ( ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -lt 4 ) ]]; then - __%[1]s_debug "No sort directive not supported in this version of bash" - else - __%[1]s_debug "Activating keep order" - compopt -o nosort - fi - else - __%[1]s_debug "No sort directive not supported in this version of bash" - fi - fi - if (((directive & shellCompDirectiveNoFileComp) != 0)); then - if [[ $(type -t compopt) == builtin ]]; then - __%[1]s_debug "Activating no file completion" - compopt +o default - else - __%[1]s_debug "No file completion directive not supported in this version of bash" - fi - fi - fi - - # Separate activeHelp from normal completions - local completions=() - local activeHelp=() - __%[1]s_extract_activeHelp - - if (((directive & shellCompDirectiveFilterFileExt) != 0)); then - # File extension filtering - local fullFilter filter filteringCmd - - # Do not use quotes around the $completions variable or else newline - # characters will be kept. - for filter in ${completions[*]}; do - fullFilter+="$filter|" - done - - filteringCmd="_filedir $fullFilter" - __%[1]s_debug "File filtering command: $filteringCmd" - $filteringCmd - elif (((directive & shellCompDirectiveFilterDirs) != 0)); then - # File completion for directories only - - local subdir - subdir=${completions[0]} - if [[ -n $subdir ]]; then - __%[1]s_debug "Listing directories in $subdir" - pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return - else - __%[1]s_debug "Listing directories in ." - _filedir -d - fi - else - __%[1]s_handle_completion_types - fi - - __%[1]s_handle_special_char "$cur" : - __%[1]s_handle_special_char "$cur" = - - # Print the activeHelp statements before we finish - if ((${#activeHelp[*]} != 0)); then - printf "\n"; - printf "%%s\n" "${activeHelp[@]}" - printf "\n" - - # The prompt format is only available from bash 4.4. - # We test if it is available before using it. - if (x=${PS1@P}) 2> /dev/null; then - printf "%%s" "${PS1@P}${COMP_LINE[@]}" - else - # Can't print the prompt. Just print the - # text the user had typed, it is workable enough. - printf "%%s" "${COMP_LINE[@]}" - fi - fi -} - -# Separate activeHelp lines from real completions. -# Fills the $activeHelp and $completions arrays. -__%[1]s_extract_activeHelp() { - local activeHelpMarker="%[9]s" - local endIndex=${#activeHelpMarker} - - while IFS='' read -r comp; do - if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then - comp=${comp:endIndex} - __%[1]s_debug "ActiveHelp found: $comp" - if [[ -n $comp ]]; then - activeHelp+=("$comp") - fi - else - # Not an activeHelp line but a normal completion - completions+=("$comp") - fi - done <<<"${out}" -} - -__%[1]s_handle_completion_types() { - __%[1]s_debug "__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE" - - case $COMP_TYPE in - 37|42) - # Type: menu-complete/menu-complete-backward and insert-completions - # If the user requested inserting one completion at a time, or all - # completions at once on the command-line we must remove the descriptions. - # https://github.com/spf13/cobra/issues/1508 - local tab=$'\t' comp - while IFS='' read -r comp; do - [[ -z $comp ]] && continue - # Strip any description - comp=${comp%%%%$tab*} - # Only consider the completions that match - if [[ $comp == "$cur"* ]]; then - COMPREPLY+=("$comp") - fi - done < <(printf "%%s\n" "${completions[@]}") - ;; - - *) - # Type: complete (normal completion) - __%[1]s_handle_standard_completion_case - ;; - esac -} - -__%[1]s_handle_standard_completion_case() { - local tab=$'\t' comp - - # Short circuit to optimize if we don't have descriptions - if [[ "${completions[*]}" != *$tab* ]]; then - IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur") - return 0 - fi - - local longest=0 - local compline - # Look for the longest completion so that we can format things nicely - while IFS='' read -r compline; do - [[ -z $compline ]] && continue - # Strip any description before checking the length - comp=${compline%%%%$tab*} - # Only consider the completions that match - [[ $comp == "$cur"* ]] || continue - COMPREPLY+=("$compline") - if ((${#comp}>longest)); then - longest=${#comp} - fi - done < <(printf "%%s\n" "${completions[@]}") - - # If there is a single completion left, remove the description text - if ((${#COMPREPLY[*]} == 1)); then - __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" - comp="${COMPREPLY[0]%%%%$tab*}" - __%[1]s_debug "Removed description from single completion, which is now: ${comp}" - COMPREPLY[0]=$comp - else # Format the descriptions - __%[1]s_format_comp_descriptions $longest - fi -} - -__%[1]s_handle_special_char() -{ - local comp="$1" - local char=$2 - if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then - local word=${comp%%"${comp##*${char}}"} - local idx=${#COMPREPLY[*]} - while ((--idx >= 0)); do - COMPREPLY[idx]=${COMPREPLY[idx]#"$word"} - done - fi -} - -__%[1]s_format_comp_descriptions() -{ - local tab=$'\t' - local comp desc maxdesclength - local longest=$1 - - local i ci - for ci in ${!COMPREPLY[*]}; do - comp=${COMPREPLY[ci]} - # Properly format the description string which follows a tab character if there is one - if [[ "$comp" == *$tab* ]]; then - __%[1]s_debug "Original comp: $comp" - desc=${comp#*$tab} - comp=${comp%%%%$tab*} - - # $COLUMNS stores the current shell width. - # Remove an extra 4 because we add 2 spaces and 2 parentheses. - maxdesclength=$(( COLUMNS - longest - 4 )) - - # Make sure we can fit a description of at least 8 characters - # if we are to align the descriptions. - if ((maxdesclength > 8)); then - # Add the proper number of spaces to align the descriptions - for ((i = ${#comp} ; i < longest ; i++)); do - comp+=" " - done - else - # Don't pad the descriptions so we can fit more text after the completion - maxdesclength=$(( COLUMNS - ${#comp} - 4 )) - fi - - # If there is enough space for any description text, - # truncate the descriptions that are too long for the shell width - if ((maxdesclength > 0)); then - if ((${#desc} > maxdesclength)); then - desc=${desc:0:$(( maxdesclength - 1 ))} - desc+="…" - fi - comp+=" ($desc)" - fi - COMPREPLY[ci]=$comp - __%[1]s_debug "Final comp: $comp" - fi - done -} - -__start_%[1]s() -{ - local cur prev words cword split - - COMPREPLY=() - - # Call _init_completion from the bash-completion package - # to prepare the arguments properly - if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -n =: || return - else - __%[1]s_init_completion -n =: || return - fi - - __%[1]s_debug - __%[1]s_debug "========= starting completion logic ==========" - __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword" - - # The user could have moved the cursor backwards on the command-line. - # We need to trigger completion from the $cword location, so we need - # to truncate the command-line ($words) up to the $cword location. - words=("${words[@]:0:$cword+1}") - __%[1]s_debug "Truncated words[*]: ${words[*]}," - - local out directive - __%[1]s_get_completion_results - __%[1]s_process_completion_results -} - -if [[ $(type -t compopt) = "builtin" ]]; then - complete -o default -F __start_%[1]s %[1]s -else - complete -o default -o nospace -F __start_%[1]s %[1]s -fi - -# ex: ts=4 sw=4 et filetype=sh -`, name, compCmd, - ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, - activeHelpMarker)) -} - -// GenBashCompletionFileV2 generates Bash completion version 2. -func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenBashCompletionV2(outFile, includeDesc) -} - -// GenBashCompletionV2 generates Bash completion file version 2 -// and writes it to the passed writer. -func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error { - return c.genBashCompletion(w, includeDesc) -} diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go deleted file mode 100644 index a6b160ce53..0000000000 --- a/vendor/github.com/spf13/cobra/cobra.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Commands similar to git, go tools and other modern CLI tools -// inspired by go, go-Commander, gh and subcommand - -package cobra - -import ( - "fmt" - "io" - "os" - "reflect" - "strconv" - "strings" - "text/template" - "time" - "unicode" -) - -var templateFuncs = template.FuncMap{ - "trim": strings.TrimSpace, - "trimRightSpace": trimRightSpace, - "trimTrailingWhitespaces": trimRightSpace, - "appendIfNotPresent": appendIfNotPresent, - "rpad": rpad, - "gt": Gt, - "eq": Eq, -} - -var initializers []func() -var finalizers []func() - -const ( - defaultPrefixMatching = false - defaultCommandSorting = true - defaultCaseInsensitive = false - defaultTraverseRunHooks = false -) - -// EnablePrefixMatching allows setting automatic prefix matching. Automatic prefix matching can be a dangerous thing -// to automatically enable in CLI tools. -// Set this to true to enable it. -var EnablePrefixMatching = defaultPrefixMatching - -// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. -// To disable sorting, set it to false. -var EnableCommandSorting = defaultCommandSorting - -// EnableCaseInsensitive allows case-insensitive commands names. (case sensitive by default) -var EnableCaseInsensitive = defaultCaseInsensitive - -// EnableTraverseRunHooks executes persistent pre-run and post-run hooks from all parents. -// By default this is disabled, which means only the first run hook to be found is executed. -var EnableTraverseRunHooks = defaultTraverseRunHooks - -// MousetrapHelpText enables an information splash screen on Windows -// if the CLI is started from explorer.exe. -// To disable the mousetrap, just set this variable to blank string (""). -// Works only on Microsoft Windows. -var MousetrapHelpText = `This is a command line tool. - -You need to open cmd.exe and run it from there. -` - -// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows -// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed. -// To disable the mousetrap, just set MousetrapHelpText to blank string (""). -// Works only on Microsoft Windows. -var MousetrapDisplayDuration = 5 * time.Second - -// AddTemplateFunc adds a template function that's available to Usage and Help -// template generation. -func AddTemplateFunc(name string, tmplFunc interface{}) { - templateFuncs[name] = tmplFunc -} - -// AddTemplateFuncs adds multiple template functions that are available to Usage and -// Help template generation. -func AddTemplateFuncs(tmplFuncs template.FuncMap) { - for k, v := range tmplFuncs { - templateFuncs[k] = v - } -} - -// OnInitialize sets the passed functions to be run when each command's -// Execute method is called. -func OnInitialize(y ...func()) { - initializers = append(initializers, y...) -} - -// OnFinalize sets the passed functions to be run when each command's -// Execute method is terminated. -func OnFinalize(y ...func()) { - finalizers = append(finalizers, y...) -} - -// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. - -// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, -// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as -// ints and then compared. -func Gt(a interface{}, b interface{}) bool { - var left, right int64 - av := reflect.ValueOf(a) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - left = int64(av.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - left = av.Int() - case reflect.String: - left, _ = strconv.ParseInt(av.String(), 10, 64) - } - - bv := reflect.ValueOf(b) - - switch bv.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - right = int64(bv.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - right = bv.Int() - case reflect.String: - right, _ = strconv.ParseInt(bv.String(), 10, 64) - } - - return left > right -} - -// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. - -// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. -func Eq(a interface{}, b interface{}) bool { - av := reflect.ValueOf(a) - bv := reflect.ValueOf(b) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - panic("Eq called on unsupported type") - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return av.Int() == bv.Int() - case reflect.String: - return av.String() == bv.String() - } - return false -} - -func trimRightSpace(s string) string { - return strings.TrimRightFunc(s, unicode.IsSpace) -} - -// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. - -// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s. -func appendIfNotPresent(s, stringToAppend string) string { - if strings.Contains(s, stringToAppend) { - return s - } - return s + " " + stringToAppend -} - -// rpad adds padding to the right of a string. -func rpad(s string, padding int) string { - formattedString := fmt.Sprintf("%%-%ds", padding) - return fmt.Sprintf(formattedString, s) -} - -// tmpl executes the given template text on data, writing the result to w. -func tmpl(w io.Writer, text string, data interface{}) error { - t := template.New("top") - t.Funcs(templateFuncs) - template.Must(t.Parse(text)) - return t.Execute(w, data) -} - -// ld compares two strings and returns the levenshtein distance between them. -func ld(s, t string, ignoreCase bool) int { - if ignoreCase { - s = strings.ToLower(s) - t = strings.ToLower(t) - } - d := make([][]int, len(s)+1) - for i := range d { - d[i] = make([]int, len(t)+1) - } - for i := range d { - d[i][0] = i - } - for j := range d[0] { - d[0][j] = j - } - for j := 1; j <= len(t); j++ { - for i := 1; i <= len(s); i++ { - if s[i-1] == t[j-1] { - d[i][j] = d[i-1][j-1] - } else { - min := d[i-1][j] - if d[i][j-1] < min { - min = d[i][j-1] - } - if d[i-1][j-1] < min { - min = d[i-1][j-1] - } - d[i][j] = min + 1 - } - } - - } - return d[len(s)][len(t)] -} - -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing. -func CheckErr(msg interface{}) { - if msg != nil { - fmt.Fprintln(os.Stderr, "Error:", msg) - os.Exit(1) - } -} - -// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil. -func WriteStringAndCheck(b io.StringWriter, s string) { - _, err := b.WriteString(s) - CheckErr(err) -} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go deleted file mode 100644 index 2fbe6c131a..0000000000 --- a/vendor/github.com/spf13/cobra/command.go +++ /dev/null @@ -1,1885 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. -// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. -package cobra - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - - flag "github.com/spf13/pflag" -) - -const ( - FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" - CommandDisplayNameAnnotation = "cobra_annotation_command_display_name" -) - -// FParseErrWhitelist configures Flag parse errors to be ignored -type FParseErrWhitelist flag.ParseErrorsWhitelist - -// Group Structure to manage groups for commands -type Group struct { - ID string - Title string -} - -// Command is just that, a command for your application. -// E.g. 'go run ...' - 'run' is the command. Cobra requires -// you to define the usage and description as part of your command -// definition to ensure usability. -type Command struct { - // Use is the one-line usage message. - // Recommended syntax is as follows: - // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. - // ... indicates that you can specify multiple values for the previous argument. - // | indicates mutually exclusive information. You can use the argument to the left of the separator or the - // argument to the right of the separator. You cannot use both arguments in a single use of the command. - // { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are - // optional, they are enclosed in brackets ([ ]). - // Example: add [-F file | -D dir]... [-f format] profile - Use string - - // Aliases is an array of aliases that can be used instead of the first word in Use. - Aliases []string - - // SuggestFor is an array of command names for which this command will be suggested - - // similar to aliases but only suggests. - SuggestFor []string - - // Short is the short description shown in the 'help' output. - Short string - - // The group id under which this subcommand is grouped in the 'help' output of its parent. - GroupID string - - // Long is the long message shown in the 'help ' output. - Long string - - // Example is examples of how to use the command. - Example string - - // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions - ValidArgs []string - // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. - // It is a dynamic version of using ValidArgs. - // Only one of ValidArgs and ValidArgsFunction can be used for a command. - ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) - - // Expected arguments - Args PositionalArgs - - // ArgAliases is List of aliases for ValidArgs. - // These are not suggested to the user in the shell completion, - // but accepted if entered manually. - ArgAliases []string - - // BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator. - // For portability with other shells, it is recommended to instead use ValidArgsFunction - BashCompletionFunction string - - // Deprecated defines, if this command is deprecated and should print this string when used. - Deprecated string - - // Annotations are key/value pairs that can be used by applications to identify or - // group commands or set special options. - Annotations map[string]string - - // Version defines the version for this command. If this value is non-empty and the command does not - // define a "version" flag, a "version" boolean flag will be added to the command and, if specified, - // will print content of the "Version" variable. A shorthand "v" flag will also be added if the - // command does not define one. - Version string - - // The *Run functions are executed in the following order: - // * PersistentPreRun() - // * PreRun() - // * Run() - // * PostRun() - // * PersistentPostRun() - // All functions get the same args, the arguments after the command name. - // The *PreRun and *PostRun functions will only be executed if the Run function of the current - // command has been declared. - // - // PersistentPreRun: children of this command will inherit and execute. - PersistentPreRun func(cmd *Command, args []string) - // PersistentPreRunE: PersistentPreRun but returns an error. - PersistentPreRunE func(cmd *Command, args []string) error - // PreRun: children of this command will not inherit. - PreRun func(cmd *Command, args []string) - // PreRunE: PreRun but returns an error. - PreRunE func(cmd *Command, args []string) error - // Run: Typically the actual work function. Most commands will only implement this. - Run func(cmd *Command, args []string) - // RunE: Run but returns an error. - RunE func(cmd *Command, args []string) error - // PostRun: run after the Run command. - PostRun func(cmd *Command, args []string) - // PostRunE: PostRun but returns an error. - PostRunE func(cmd *Command, args []string) error - // PersistentPostRun: children of this command will inherit and execute after PostRun. - PersistentPostRun func(cmd *Command, args []string) - // PersistentPostRunE: PersistentPostRun but returns an error. - PersistentPostRunE func(cmd *Command, args []string) error - - // groups for subcommands - commandgroups []*Group - - // args is actual args parsed from flags. - args []string - // flagErrorBuf contains all error messages from pflag. - flagErrorBuf *bytes.Buffer - // flags is full set of flags. - flags *flag.FlagSet - // pflags contains persistent flags. - pflags *flag.FlagSet - // lflags contains local flags. - lflags *flag.FlagSet - // iflags contains inherited flags. - iflags *flag.FlagSet - // parentsPflags is all persistent flags of cmd's parents. - parentsPflags *flag.FlagSet - // globNormFunc is the global normalization function - // that we can use on every pflag set and children commands - globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName - - // usageFunc is usage func defined by user. - usageFunc func(*Command) error - // usageTemplate is usage template defined by user. - usageTemplate string - // flagErrorFunc is func defined by user and it's called when the parsing of - // flags returns an error. - flagErrorFunc func(*Command, error) error - // helpTemplate is help template defined by user. - helpTemplate string - // helpFunc is help func defined by user. - helpFunc func(*Command, []string) - // helpCommand is command with usage 'help'. If it's not defined by user, - // cobra uses default help command. - helpCommand *Command - // helpCommandGroupID is the group id for the helpCommand - helpCommandGroupID string - - // completionCommandGroupID is the group id for the completion command - completionCommandGroupID string - - // versionTemplate is the version template defined by user. - versionTemplate string - - // errPrefix is the error message prefix defined by user. - errPrefix string - - // inReader is a reader defined by the user that replaces stdin - inReader io.Reader - // outWriter is a writer defined by the user that replaces stdout - outWriter io.Writer - // errWriter is a writer defined by the user that replaces stderr - errWriter io.Writer - - // FParseErrWhitelist flag parse errors to be ignored - FParseErrWhitelist FParseErrWhitelist - - // CompletionOptions is a set of options to control the handling of shell completion - CompletionOptions CompletionOptions - - // commandsAreSorted defines, if command slice are sorted or not. - commandsAreSorted bool - // commandCalledAs is the name or alias value used to call this command. - commandCalledAs struct { - name string - called bool - } - - ctx context.Context - - // commands is the list of commands supported by this program. - commands []*Command - // parent is a parent command for this command. - parent *Command - // Max lengths of commands' string lengths for use in padding. - commandsMaxUseLen int - commandsMaxCommandPathLen int - commandsMaxNameLen int - - // TraverseChildren parses flags on all parents before executing child command. - TraverseChildren bool - - // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. - Hidden bool - - // SilenceErrors is an option to quiet errors down stream. - SilenceErrors bool - - // SilenceUsage is an option to silence usage when an error occurs. - SilenceUsage bool - - // DisableFlagParsing disables the flag parsing. - // If this is true all flags will be passed to the command as arguments. - DisableFlagParsing bool - - // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") - // will be printed by generating docs for this command. - DisableAutoGenTag bool - - // DisableFlagsInUseLine will disable the addition of [flags] to the usage - // line of a command when printing help or generating docs - DisableFlagsInUseLine bool - - // DisableSuggestions disables the suggestions based on Levenshtein distance - // that go along with 'unknown command' messages. - DisableSuggestions bool - - // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. - // Must be > 0. - SuggestionsMinimumDistance int -} - -// Context returns underlying command context. If command was executed -// with ExecuteContext or the context was set with SetContext, the -// previously set context will be returned. Otherwise, nil is returned. -// -// Notice that a call to Execute and ExecuteC will replace a nil context of -// a command with a context.Background, so a background context will be -// returned by Context after one of these functions has been called. -func (c *Command) Context() context.Context { - return c.ctx -} - -// SetContext sets context for the command. This context will be overwritten by -// Command.ExecuteContext or Command.ExecuteContextC. -func (c *Command) SetContext(ctx context.Context) { - c.ctx = ctx -} - -// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden -// particularly useful when testing. -func (c *Command) SetArgs(a []string) { - c.args = a -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -// Deprecated: Use SetOut and/or SetErr instead -func (c *Command) SetOutput(output io.Writer) { - c.outWriter = output - c.errWriter = output -} - -// SetOut sets the destination for usage messages. -// If newOut is nil, os.Stdout is used. -func (c *Command) SetOut(newOut io.Writer) { - c.outWriter = newOut -} - -// SetErr sets the destination for error messages. -// If newErr is nil, os.Stderr is used. -func (c *Command) SetErr(newErr io.Writer) { - c.errWriter = newErr -} - -// SetIn sets the source for input data -// If newIn is nil, os.Stdin is used. -func (c *Command) SetIn(newIn io.Reader) { - c.inReader = newIn -} - -// SetUsageFunc sets usage function. Usage can be defined by application. -func (c *Command) SetUsageFunc(f func(*Command) error) { - c.usageFunc = f -} - -// SetUsageTemplate sets usage template. Can be defined by Application. -func (c *Command) SetUsageTemplate(s string) { - c.usageTemplate = s -} - -// SetFlagErrorFunc sets a function to generate an error when flag parsing -// fails. -func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) { - c.flagErrorFunc = f -} - -// SetHelpFunc sets help function. Can be defined by Application. -func (c *Command) SetHelpFunc(f func(*Command, []string)) { - c.helpFunc = f -} - -// SetHelpCommand sets help command. -func (c *Command) SetHelpCommand(cmd *Command) { - c.helpCommand = cmd -} - -// SetHelpCommandGroupID sets the group id of the help command. -func (c *Command) SetHelpCommandGroupID(groupID string) { - if c.helpCommand != nil { - c.helpCommand.GroupID = groupID - } - // helpCommandGroupID is used if no helpCommand is defined by the user - c.helpCommandGroupID = groupID -} - -// SetCompletionCommandGroupID sets the group id of the completion command. -func (c *Command) SetCompletionCommandGroupID(groupID string) { - // completionCommandGroupID is used if no completion command is defined by the user - c.Root().completionCommandGroupID = groupID -} - -// SetHelpTemplate sets help template to be used. Application can use it to set custom template. -func (c *Command) SetHelpTemplate(s string) { - c.helpTemplate = s -} - -// SetVersionTemplate sets version template to be used. Application can use it to set custom template. -func (c *Command) SetVersionTemplate(s string) { - c.versionTemplate = s -} - -// SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix. -func (c *Command) SetErrPrefix(s string) { - c.errPrefix = s -} - -// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. -// The user should not have a cyclic dependency on commands. -func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { - c.Flags().SetNormalizeFunc(n) - c.PersistentFlags().SetNormalizeFunc(n) - c.globNormFunc = n - - for _, command := range c.commands { - command.SetGlobalNormalizationFunc(n) - } -} - -// OutOrStdout returns output to stdout. -func (c *Command) OutOrStdout() io.Writer { - return c.getOut(os.Stdout) -} - -// OutOrStderr returns output to stderr -func (c *Command) OutOrStderr() io.Writer { - return c.getOut(os.Stderr) -} - -// ErrOrStderr returns output to stderr -func (c *Command) ErrOrStderr() io.Writer { - return c.getErr(os.Stderr) -} - -// InOrStdin returns input to stdin -func (c *Command) InOrStdin() io.Reader { - return c.getIn(os.Stdin) -} - -func (c *Command) getOut(def io.Writer) io.Writer { - if c.outWriter != nil { - return c.outWriter - } - if c.HasParent() { - return c.parent.getOut(def) - } - return def -} - -func (c *Command) getErr(def io.Writer) io.Writer { - if c.errWriter != nil { - return c.errWriter - } - if c.HasParent() { - return c.parent.getErr(def) - } - return def -} - -func (c *Command) getIn(def io.Reader) io.Reader { - if c.inReader != nil { - return c.inReader - } - if c.HasParent() { - return c.parent.getIn(def) - } - return def -} - -// UsageFunc returns either the function set by SetUsageFunc for this command -// or a parent, or it returns a default usage function. -func (c *Command) UsageFunc() (f func(*Command) error) { - if c.usageFunc != nil { - return c.usageFunc - } - if c.HasParent() { - return c.Parent().UsageFunc() - } - return func(c *Command) error { - c.mergePersistentFlags() - err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) - if err != nil { - c.PrintErrln(err) - } - return err - } -} - -// Usage puts out the usage for the command. -// Used when a user provides invalid input. -// Can be defined by user by overriding UsageFunc. -func (c *Command) Usage() error { - return c.UsageFunc()(c) -} - -// HelpFunc returns either the function set by SetHelpFunc for this command -// or a parent, or it returns a function with default help behavior. -func (c *Command) HelpFunc() func(*Command, []string) { - if c.helpFunc != nil { - return c.helpFunc - } - if c.HasParent() { - return c.Parent().HelpFunc() - } - return func(c *Command, a []string) { - c.mergePersistentFlags() - // The help should be sent to stdout - // See https://github.com/spf13/cobra/issues/1002 - err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) - if err != nil { - c.PrintErrln(err) - } - } -} - -// Help puts out the help for the command. -// Used when a user calls help [command]. -// Can be defined by user by overriding HelpFunc. -func (c *Command) Help() error { - c.HelpFunc()(c, []string{}) - return nil -} - -// UsageString returns usage string. -func (c *Command) UsageString() string { - // Storing normal writers - tmpOutput := c.outWriter - tmpErr := c.errWriter - - bb := new(bytes.Buffer) - c.outWriter = bb - c.errWriter = bb - - CheckErr(c.Usage()) - - // Setting things back to normal - c.outWriter = tmpOutput - c.errWriter = tmpErr - - return bb.String() -} - -// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this -// command or a parent, or it returns a function which returns the original -// error. -func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { - if c.flagErrorFunc != nil { - return c.flagErrorFunc - } - - if c.HasParent() { - return c.parent.FlagErrorFunc() - } - return func(c *Command, err error) error { - return err - } -} - -var minUsagePadding = 25 - -// UsagePadding return padding for the usage. -func (c *Command) UsagePadding() int { - if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { - return minUsagePadding - } - return c.parent.commandsMaxUseLen -} - -var minCommandPathPadding = 11 - -// CommandPathPadding return padding for the command path. -func (c *Command) CommandPathPadding() int { - if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { - return minCommandPathPadding - } - return c.parent.commandsMaxCommandPathLen -} - -var minNamePadding = 11 - -// NamePadding returns padding for the name. -func (c *Command) NamePadding() int { - if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { - return minNamePadding - } - return c.parent.commandsMaxNameLen -} - -// UsageTemplate returns usage template for the command. -func (c *Command) UsageTemplate() string { - if c.usageTemplate != "" { - return c.usageTemplate - } - - if c.HasParent() { - return c.parent.UsageTemplate() - } - return `Usage:{{if .Runnable}} - {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} - {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} - -Aliases: - {{.NameAndAliases}}{{end}}{{if .HasExample}} - -Examples: -{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}} - -Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}} - -{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}} - -Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} - -Flags: -{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} - -Global Flags: -{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} - -Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} - -Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} -` -} - -// HelpTemplate return help template for the command. -func (c *Command) HelpTemplate() string { - if c.helpTemplate != "" { - return c.helpTemplate - } - - if c.HasParent() { - return c.parent.HelpTemplate() - } - return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} - -{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` -} - -// VersionTemplate return version template for the command. -func (c *Command) VersionTemplate() string { - if c.versionTemplate != "" { - return c.versionTemplate - } - - if c.HasParent() { - return c.parent.VersionTemplate() - } - return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} -` -} - -// ErrPrefix return error message prefix for the command -func (c *Command) ErrPrefix() string { - if c.errPrefix != "" { - return c.errPrefix - } - - if c.HasParent() { - return c.parent.ErrPrefix() - } - return "Error:" -} - -func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { - flag := fs.Lookup(name) - if flag == nil { - return false - } - return flag.NoOptDefVal != "" -} - -func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool { - if len(name) == 0 { - return false - } - - flag := fs.ShorthandLookup(name[:1]) - if flag == nil { - return false - } - return flag.NoOptDefVal != "" -} - -func stripFlags(args []string, c *Command) []string { - if len(args) == 0 { - return args - } - c.mergePersistentFlags() - - commands := []string{} - flags := c.Flags() - -Loop: - for len(args) > 0 { - s := args[0] - args = args[1:] - switch { - case s == "--": - // "--" terminates the flags - break Loop - case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): - // If '--flag arg' then - // delete arg from args. - fallthrough // (do the same as below) - case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): - // If '-f arg' then - // delete 'arg' from args or break the loop if len(args) <= 1. - if len(args) <= 1 { - break Loop - } else { - args = args[1:] - continue - } - case s != "" && !strings.HasPrefix(s, "-"): - commands = append(commands, s) - } - } - - return commands -} - -// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like -// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). -// Special care needs to be taken not to remove a flag value. -func (c *Command) argsMinusFirstX(args []string, x string) []string { - if len(args) == 0 { - return args - } - c.mergePersistentFlags() - flags := c.Flags() - -Loop: - for pos := 0; pos < len(args); pos++ { - s := args[pos] - switch { - case s == "--": - // -- means we have reached the end of the parseable args. Break out of the loop now. - break Loop - case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): - fallthrough - case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): - // This is a flag without a default value, and an equal sign is not used. Increment pos in order to skip - // over the next arg, because that is the value of this flag. - pos++ - continue - case !strings.HasPrefix(s, "-"): - // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, - // return the args, excluding the one at this position. - if s == x { - ret := []string{} - ret = append(ret, args[:pos]...) - ret = append(ret, args[pos+1:]...) - return ret - } - } - } - return args -} - -func isFlagArg(arg string) bool { - return ((len(arg) >= 3 && arg[0:2] == "--") || - (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) -} - -// Find the target command given the args and command tree -// Meant to be run on the highest node. Only searches down. -func (c *Command) Find(args []string) (*Command, []string, error) { - var innerfind func(*Command, []string) (*Command, []string) - - innerfind = func(c *Command, innerArgs []string) (*Command, []string) { - argsWOflags := stripFlags(innerArgs, c) - if len(argsWOflags) == 0 { - return c, innerArgs - } - nextSubCmd := argsWOflags[0] - - cmd := c.findNext(nextSubCmd) - if cmd != nil { - return innerfind(cmd, c.argsMinusFirstX(innerArgs, nextSubCmd)) - } - return c, innerArgs - } - - commandFound, a := innerfind(c, args) - if commandFound.Args == nil { - return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound)) - } - return commandFound, a, nil -} - -func (c *Command) findSuggestions(arg string) string { - if c.DisableSuggestions { - return "" - } - if c.SuggestionsMinimumDistance <= 0 { - c.SuggestionsMinimumDistance = 2 - } - suggestionsString := "" - if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" - for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) - } - } - return suggestionsString -} - -func (c *Command) findNext(next string) *Command { - matches := make([]*Command, 0) - for _, cmd := range c.commands { - if commandNameMatches(cmd.Name(), next) || cmd.HasAlias(next) { - cmd.commandCalledAs.name = next - return cmd - } - if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) { - matches = append(matches, cmd) - } - } - - if len(matches) == 1 { - // Temporarily disable gosec G602, which produces a false positive. - // See https://github.com/securego/gosec/issues/1005. - return matches[0] // #nosec G602 - } - - return nil -} - -// Traverse the command tree to find the command, and parse args for -// each parent. -func (c *Command) Traverse(args []string) (*Command, []string, error) { - flags := []string{} - inFlag := false - - for i, arg := range args { - switch { - // A long flag with a space separated value - case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="): - // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' - inFlag = !hasNoOptDefVal(arg[2:], c.Flags()) - flags = append(flags, arg) - continue - // A short flag with a space separated value - case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()): - inFlag = true - flags = append(flags, arg) - continue - // The value for a flag - case inFlag: - inFlag = false - flags = append(flags, arg) - continue - // A flag without a value, or with an `=` separated value - case isFlagArg(arg): - flags = append(flags, arg) - continue - } - - cmd := c.findNext(arg) - if cmd == nil { - return c, args, nil - } - - if err := c.ParseFlags(flags); err != nil { - return nil, args, err - } - return cmd.Traverse(args[i+1:]) - } - return c, args, nil -} - -// SuggestionsFor provides suggestions for the typedName. -func (c *Command) SuggestionsFor(typedName string) []string { - suggestions := []string{} - for _, cmd := range c.commands { - if cmd.IsAvailableCommand() { - levenshteinDistance := ld(typedName, cmd.Name(), true) - suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance - suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName)) - if suggestByLevenshtein || suggestByPrefix { - suggestions = append(suggestions, cmd.Name()) - } - for _, explicitSuggestion := range cmd.SuggestFor { - if strings.EqualFold(typedName, explicitSuggestion) { - suggestions = append(suggestions, cmd.Name()) - } - } - } - } - return suggestions -} - -// VisitParents visits all parents of the command and invokes fn on each parent. -func (c *Command) VisitParents(fn func(*Command)) { - if c.HasParent() { - fn(c.Parent()) - c.Parent().VisitParents(fn) - } -} - -// Root finds root command. -func (c *Command) Root() *Command { - if c.HasParent() { - return c.Parent().Root() - } - return c -} - -// ArgsLenAtDash will return the length of c.Flags().Args at the moment -// when a -- was found during args parsing. -func (c *Command) ArgsLenAtDash() int { - return c.Flags().ArgsLenAtDash() -} - -func (c *Command) execute(a []string) (err error) { - if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") - } - - if len(c.Deprecated) > 0 { - c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) - } - - // initialize help and version flag at the last point possible to allow for user - // overriding - c.InitDefaultHelpFlag() - c.InitDefaultVersionFlag() - - err = c.ParseFlags(a) - if err != nil { - return c.FlagErrorFunc()(c, err) - } - - // If help is called, regardless of other flags, return we want help. - // Also say we need help if the command isn't runnable. - helpVal, err := c.Flags().GetBool("help") - if err != nil { - // should be impossible to get here as we always declare a help - // flag in InitDefaultHelpFlag() - c.Println("\"help\" flag declared as non-bool. Please correct your code") - return err - } - - if helpVal { - return flag.ErrHelp - } - - // for back-compat, only add version flag behavior if version is defined - if c.Version != "" { - versionVal, err := c.Flags().GetBool("version") - if err != nil { - c.Println("\"version\" flag declared as non-bool. Please correct your code") - return err - } - if versionVal { - err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c) - if err != nil { - c.Println(err) - } - return err - } - } - - if !c.Runnable() { - return flag.ErrHelp - } - - c.preRun() - - defer c.postRun() - - argWoFlags := c.Flags().Args() - if c.DisableFlagParsing { - argWoFlags = a - } - - if err := c.ValidateArgs(argWoFlags); err != nil { - return err - } - - parents := make([]*Command, 0, 5) - for p := c; p != nil; p = p.Parent() { - if EnableTraverseRunHooks { - // When EnableTraverseRunHooks is set: - // - Execute all persistent pre-runs from the root parent till this command. - // - Execute all persistent post-runs from this command till the root parent. - parents = append([]*Command{p}, parents...) - } else { - // Otherwise, execute only the first found persistent hook. - parents = append(parents, p) - } - } - for _, p := range parents { - if p.PersistentPreRunE != nil { - if err := p.PersistentPreRunE(c, argWoFlags); err != nil { - return err - } - if !EnableTraverseRunHooks { - break - } - } else if p.PersistentPreRun != nil { - p.PersistentPreRun(c, argWoFlags) - if !EnableTraverseRunHooks { - break - } - } - } - if c.PreRunE != nil { - if err := c.PreRunE(c, argWoFlags); err != nil { - return err - } - } else if c.PreRun != nil { - c.PreRun(c, argWoFlags) - } - - if err := c.ValidateRequiredFlags(); err != nil { - return err - } - if err := c.ValidateFlagGroups(); err != nil { - return err - } - - if c.RunE != nil { - if err := c.RunE(c, argWoFlags); err != nil { - return err - } - } else { - c.Run(c, argWoFlags) - } - if c.PostRunE != nil { - if err := c.PostRunE(c, argWoFlags); err != nil { - return err - } - } else if c.PostRun != nil { - c.PostRun(c, argWoFlags) - } - for p := c; p != nil; p = p.Parent() { - if p.PersistentPostRunE != nil { - if err := p.PersistentPostRunE(c, argWoFlags); err != nil { - return err - } - if !EnableTraverseRunHooks { - break - } - } else if p.PersistentPostRun != nil { - p.PersistentPostRun(c, argWoFlags) - if !EnableTraverseRunHooks { - break - } - } - } - - return nil -} - -func (c *Command) preRun() { - for _, x := range initializers { - x() - } -} - -func (c *Command) postRun() { - for _, x := range finalizers { - x() - } -} - -// ExecuteContext is the same as Execute(), but sets the ctx on the command. -// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs -// functions. -func (c *Command) ExecuteContext(ctx context.Context) error { - c.ctx = ctx - return c.Execute() -} - -// Execute uses the args (os.Args[1:] by default) -// and run through the command tree finding appropriate matches -// for commands and then corresponding flags. -func (c *Command) Execute() error { - _, err := c.ExecuteC() - return err -} - -// ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command. -// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs -// functions. -func (c *Command) ExecuteContextC(ctx context.Context) (*Command, error) { - c.ctx = ctx - return c.ExecuteC() -} - -// ExecuteC executes the command. -func (c *Command) ExecuteC() (cmd *Command, err error) { - if c.ctx == nil { - c.ctx = context.Background() - } - - // Regardless of what command execute is called on, run on Root only - if c.HasParent() { - return c.Root().ExecuteC() - } - - // windows hook - if preExecHookFn != nil { - preExecHookFn(c) - } - - // initialize help at the last point to allow for user overriding - c.InitDefaultHelpCmd() - // initialize completion at the last point to allow for user overriding - c.InitDefaultCompletionCmd() - - // Now that all commands have been created, let's make sure all groups - // are properly created also - c.checkCommandGroups() - - args := c.args - - // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 - if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { - args = os.Args[1:] - } - - // initialize the hidden command to be used for shell completion - c.initCompleteCmd(args) - - var flags []string - if c.TraverseChildren { - cmd, flags, err = c.Traverse(args) - } else { - cmd, flags, err = c.Find(args) - } - if err != nil { - // If found parse to a subcommand and then failed, talk about the subcommand - if cmd != nil { - c = cmd - } - if !c.SilenceErrors { - c.PrintErrln(c.ErrPrefix(), err.Error()) - c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath()) - } - return c, err - } - - cmd.commandCalledAs.called = true - if cmd.commandCalledAs.name == "" { - cmd.commandCalledAs.name = cmd.Name() - } - - // We have to pass global context to children command - // if context is present on the parent command. - if cmd.ctx == nil { - cmd.ctx = c.ctx - } - - err = cmd.execute(flags) - if err != nil { - // Always show help if requested, even if SilenceErrors is in - // effect - if errors.Is(err, flag.ErrHelp) { - cmd.HelpFunc()(cmd, args) - return cmd, nil - } - - // If root command has SilenceErrors flagged, - // all subcommands should respect it - if !cmd.SilenceErrors && !c.SilenceErrors { - c.PrintErrln(cmd.ErrPrefix(), err.Error()) - } - - // If root command has SilenceUsage flagged, - // all subcommands should respect it - if !cmd.SilenceUsage && !c.SilenceUsage { - c.Println(cmd.UsageString()) - } - } - return cmd, err -} - -func (c *Command) ValidateArgs(args []string) error { - if c.Args == nil { - return ArbitraryArgs(c, args) - } - return c.Args(c, args) -} - -// ValidateRequiredFlags validates all required flags are present and returns an error otherwise -func (c *Command) ValidateRequiredFlags() error { - if c.DisableFlagParsing { - return nil - } - - flags := c.Flags() - missingFlagNames := []string{} - flags.VisitAll(func(pflag *flag.Flag) { - requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag] - if !found { - return - } - if (requiredAnnotation[0] == "true") && !pflag.Changed { - missingFlagNames = append(missingFlagNames, pflag.Name) - } - }) - - if len(missingFlagNames) > 0 { - return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`)) - } - return nil -} - -// checkCommandGroups checks if a command has been added to a group that does not exists. -// If so, we panic because it indicates a coding error that should be corrected. -func (c *Command) checkCommandGroups() { - for _, sub := range c.commands { - // if Group is not defined let the developer know right away - if sub.GroupID != "" && !c.ContainsGroup(sub.GroupID) { - panic(fmt.Sprintf("group id '%s' is not defined for subcommand '%s'", sub.GroupID, sub.CommandPath())) - } - - sub.checkCommandGroups() - } -} - -// InitDefaultHelpFlag adds default help flag to c. -// It is called automatically by executing the c or by calling help and usage. -// If c already has help flag, it will do nothing. -func (c *Command) InitDefaultHelpFlag() { - c.mergePersistentFlags() - if c.Flags().Lookup("help") == nil { - usage := "help for " - if c.Name() == "" { - usage += "this command" - } else { - usage += c.Name() - } - c.Flags().BoolP("help", "h", false, usage) - _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"}) - } -} - -// InitDefaultVersionFlag adds default version flag to c. -// It is called automatically by executing the c. -// If c already has a version flag, it will do nothing. -// If c.Version is empty, it will do nothing. -func (c *Command) InitDefaultVersionFlag() { - if c.Version == "" { - return - } - - c.mergePersistentFlags() - if c.Flags().Lookup("version") == nil { - usage := "version for " - if c.Name() == "" { - usage += "this command" - } else { - usage += c.Name() - } - if c.Flags().ShorthandLookup("v") == nil { - c.Flags().BoolP("version", "v", false, usage) - } else { - c.Flags().Bool("version", false, usage) - } - _ = c.Flags().SetAnnotation("version", FlagSetByCobraAnnotation, []string{"true"}) - } -} - -// InitDefaultHelpCmd adds default help command to c. -// It is called automatically by executing the c or by calling help and usage. -// If c already has help command or c has no subcommands, it will do nothing. -func (c *Command) InitDefaultHelpCmd() { - if !c.HasSubCommands() { - return - } - - if c.helpCommand == nil { - c.helpCommand = &Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. -Simply type ` + c.Name() + ` help [path to command] for full details.`, - ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - var completions []string - cmd, _, e := c.Root().Find(args) - if e != nil { - return nil, ShellCompDirectiveNoFileComp - } - if cmd == nil { - // Root help command. - cmd = c.Root() - } - for _, subCmd := range cmd.Commands() { - if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand { - if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) - } - } - } - return completions, ShellCompDirectiveNoFileComp - }, - Run: func(c *Command, args []string) { - cmd, _, e := c.Root().Find(args) - if cmd == nil || e != nil { - c.Printf("Unknown help topic %#q\n", args) - CheckErr(c.Root().Usage()) - } else { - cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown - cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown - CheckErr(cmd.Help()) - } - }, - GroupID: c.helpCommandGroupID, - } - } - c.RemoveCommand(c.helpCommand) - c.AddCommand(c.helpCommand) -} - -// ResetCommands delete parent, subcommand and help command from c. -func (c *Command) ResetCommands() { - c.parent = nil - c.commands = nil - c.helpCommand = nil - c.parentsPflags = nil -} - -// Sorts commands by their names. -type commandSorterByName []*Command - -func (c commandSorterByName) Len() int { return len(c) } -func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] } -func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() } - -// Commands returns a sorted slice of child commands. -func (c *Command) Commands() []*Command { - // do not sort commands if it already sorted or sorting was disabled - if EnableCommandSorting && !c.commandsAreSorted { - sort.Sort(commandSorterByName(c.commands)) - c.commandsAreSorted = true - } - return c.commands -} - -// AddCommand adds one or more commands to this parent command. -func (c *Command) AddCommand(cmds ...*Command) { - for i, x := range cmds { - if cmds[i] == c { - panic("Command can't be a child of itself") - } - cmds[i].parent = c - // update max lengths - usageLen := len(x.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(x.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(x.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - // If global normalization function exists, update all children - if c.globNormFunc != nil { - x.SetGlobalNormalizationFunc(c.globNormFunc) - } - c.commands = append(c.commands, x) - c.commandsAreSorted = false - } -} - -// Groups returns a slice of child command groups. -func (c *Command) Groups() []*Group { - return c.commandgroups -} - -// AllChildCommandsHaveGroup returns if all subcommands are assigned to a group -func (c *Command) AllChildCommandsHaveGroup() bool { - for _, sub := range c.commands { - if (sub.IsAvailableCommand() || sub == c.helpCommand) && sub.GroupID == "" { - return false - } - } - return true -} - -// ContainsGroup return if groupID exists in the list of command groups. -func (c *Command) ContainsGroup(groupID string) bool { - for _, x := range c.commandgroups { - if x.ID == groupID { - return true - } - } - return false -} - -// AddGroup adds one or more command groups to this parent command. -func (c *Command) AddGroup(groups ...*Group) { - c.commandgroups = append(c.commandgroups, groups...) -} - -// RemoveCommand removes one or more commands from a parent command. -func (c *Command) RemoveCommand(cmds ...*Command) { - commands := []*Command{} -main: - for _, command := range c.commands { - for _, cmd := range cmds { - if command == cmd { - command.parent = nil - continue main - } - } - commands = append(commands, command) - } - c.commands = commands - // recompute all lengths - c.commandsMaxUseLen = 0 - c.commandsMaxCommandPathLen = 0 - c.commandsMaxNameLen = 0 - for _, command := range c.commands { - usageLen := len(command.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(command.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(command.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - } -} - -// Print is a convenience method to Print to the defined output, fallback to Stderr if not set. -func (c *Command) Print(i ...interface{}) { - fmt.Fprint(c.OutOrStderr(), i...) -} - -// Println is a convenience method to Println to the defined output, fallback to Stderr if not set. -func (c *Command) Println(i ...interface{}) { - c.Print(fmt.Sprintln(i...)) -} - -// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set. -func (c *Command) Printf(format string, i ...interface{}) { - c.Print(fmt.Sprintf(format, i...)) -} - -// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set. -func (c *Command) PrintErr(i ...interface{}) { - fmt.Fprint(c.ErrOrStderr(), i...) -} - -// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set. -func (c *Command) PrintErrln(i ...interface{}) { - c.PrintErr(fmt.Sprintln(i...)) -} - -// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set. -func (c *Command) PrintErrf(format string, i ...interface{}) { - c.PrintErr(fmt.Sprintf(format, i...)) -} - -// CommandPath returns the full path to this command. -func (c *Command) CommandPath() string { - if c.HasParent() { - return c.Parent().CommandPath() + " " + c.Name() - } - if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { - return displayName - } - return c.Name() -} - -// UseLine puts out the full usage for a given command (including parents). -func (c *Command) UseLine() string { - var useline string - if c.HasParent() { - useline = c.parent.CommandPath() + " " + c.Use - } else { - useline = c.Use - } - if c.DisableFlagsInUseLine { - return useline - } - if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") { - useline += " [flags]" - } - return useline -} - -// DebugFlags used to determine which flags have been assigned to which commands -// and which persist. -// nolint:goconst -func (c *Command) DebugFlags() { - c.Println("DebugFlags called on", c.Name()) - var debugflags func(*Command) - - debugflags = func(x *Command) { - if x.HasFlags() || x.HasPersistentFlags() { - c.Println(x.Name()) - } - if x.HasFlags() { - x.flags.VisitAll(func(f *flag.Flag) { - if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") - } - }) - } - if x.HasPersistentFlags() { - x.pflags.VisitAll(func(f *flag.Flag) { - if x.HasFlags() { - if x.flags.Lookup(f.Name) == nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - }) - } - c.Println(x.flagErrorBuf) - if x.HasSubCommands() { - for _, y := range x.commands { - debugflags(y) - } - } - } - - debugflags(c) -} - -// Name returns the command's name: the first word in the use line. -func (c *Command) Name() string { - name := c.Use - i := strings.Index(name, " ") - if i >= 0 { - name = name[:i] - } - return name -} - -// HasAlias determines if a given string is an alias of the command. -func (c *Command) HasAlias(s string) bool { - for _, a := range c.Aliases { - if commandNameMatches(a, s) { - return true - } - } - return false -} - -// CalledAs returns the command name or alias that was used to invoke -// this command or an empty string if the command has not been called. -func (c *Command) CalledAs() string { - if c.commandCalledAs.called { - return c.commandCalledAs.name - } - return "" -} - -// hasNameOrAliasPrefix returns true if the Name or any of aliases start -// with prefix -func (c *Command) hasNameOrAliasPrefix(prefix string) bool { - if strings.HasPrefix(c.Name(), prefix) { - c.commandCalledAs.name = c.Name() - return true - } - for _, alias := range c.Aliases { - if strings.HasPrefix(alias, prefix) { - c.commandCalledAs.name = alias - return true - } - } - return false -} - -// NameAndAliases returns a list of the command name and all aliases -func (c *Command) NameAndAliases() string { - return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") -} - -// HasExample determines if the command has example. -func (c *Command) HasExample() bool { - return len(c.Example) > 0 -} - -// Runnable determines if the command is itself runnable. -func (c *Command) Runnable() bool { - return c.Run != nil || c.RunE != nil -} - -// HasSubCommands determines if the command has children commands. -func (c *Command) HasSubCommands() bool { - return len(c.commands) > 0 -} - -// IsAvailableCommand determines if a command is available as a non-help command -// (this includes all non deprecated/hidden commands). -func (c *Command) IsAvailableCommand() bool { - if len(c.Deprecated) != 0 || c.Hidden { - return false - } - - if c.HasParent() && c.Parent().helpCommand == c { - return false - } - - if c.Runnable() || c.HasAvailableSubCommands() { - return true - } - - return false -} - -// IsAdditionalHelpTopicCommand determines if a command is an additional -// help topic command; additional help topic command is determined by the -// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that -// are runnable/hidden/deprecated. -// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924. -func (c *Command) IsAdditionalHelpTopicCommand() bool { - // if a command is runnable, deprecated, or hidden it is not a 'help' command - if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { - return false - } - - // if any non-help sub commands are found, the command is not a 'help' command - for _, sub := range c.commands { - if !sub.IsAdditionalHelpTopicCommand() { - return false - } - } - - // the command either has no sub commands, or no non-help sub commands - return true -} - -// HasHelpSubCommands determines if a command has any available 'help' sub commands -// that need to be shown in the usage/help default template under 'additional help -// topics'. -func (c *Command) HasHelpSubCommands() bool { - // return true on the first found available 'help' sub command - for _, sub := range c.commands { - if sub.IsAdditionalHelpTopicCommand() { - return true - } - } - - // the command either has no sub commands, or no available 'help' sub commands - return false -} - -// HasAvailableSubCommands determines if a command has available sub commands that -// need to be shown in the usage/help default template under 'available commands'. -func (c *Command) HasAvailableSubCommands() bool { - // return true on the first found available (non deprecated/help/hidden) - // sub command - for _, sub := range c.commands { - if sub.IsAvailableCommand() { - return true - } - } - - // the command either has no sub commands, or no available (non deprecated/help/hidden) - // sub commands - return false -} - -// HasParent determines if the command is a child command. -func (c *Command) HasParent() bool { - return c.parent != nil -} - -// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist. -func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { - return c.globNormFunc -} - -// Flags returns the complete FlagSet that applies -// to this command (local and persistent declared here and by all parents). -func (c *Command) Flags() *flag.FlagSet { - if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.flags.SetOutput(c.flagErrorBuf) - } - - return c.flags -} - -// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. -func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { - persistentFlags := c.PersistentFlags() - - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.LocalFlags().VisitAll(func(f *flag.Flag) { - if persistentFlags.Lookup(f.Name) == nil { - out.AddFlag(f) - } - }) - return out -} - -// LocalFlags returns the local FlagSet specifically set in the current command. -func (c *Command) LocalFlags() *flag.FlagSet { - c.mergePersistentFlags() - - if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.lflags.SetOutput(c.flagErrorBuf) - } - c.lflags.SortFlags = c.Flags().SortFlags - if c.globNormFunc != nil { - c.lflags.SetNormalizeFunc(c.globNormFunc) - } - - addToLocal := func(f *flag.Flag) { - // Add the flag if it is not a parent PFlag, or it shadows a parent PFlag - if c.lflags.Lookup(f.Name) == nil && f != c.parentsPflags.Lookup(f.Name) { - c.lflags.AddFlag(f) - } - } - c.Flags().VisitAll(addToLocal) - c.PersistentFlags().VisitAll(addToLocal) - return c.lflags -} - -// InheritedFlags returns all flags which were inherited from parent commands. -func (c *Command) InheritedFlags() *flag.FlagSet { - c.mergePersistentFlags() - - if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.iflags.SetOutput(c.flagErrorBuf) - } - - local := c.LocalFlags() - if c.globNormFunc != nil { - c.iflags.SetNormalizeFunc(c.globNormFunc) - } - - c.parentsPflags.VisitAll(func(f *flag.Flag) { - if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { - c.iflags.AddFlag(f) - } - }) - return c.iflags -} - -// NonInheritedFlags returns all flags which were not inherited from parent commands. -func (c *Command) NonInheritedFlags() *flag.FlagSet { - return c.LocalFlags() -} - -// PersistentFlags returns the persistent FlagSet specifically set in the current command. -func (c *Command) PersistentFlags() *flag.FlagSet { - if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.pflags.SetOutput(c.flagErrorBuf) - } - return c.pflags -} - -// ResetFlags deletes all flags from command. -func (c *Command) ResetFlags() { - c.flagErrorBuf = new(bytes.Buffer) - c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.pflags.SetOutput(c.flagErrorBuf) - - c.lflags = nil - c.iflags = nil - c.parentsPflags = nil -} - -// HasFlags checks if the command contains any flags (local plus persistent from the entire structure). -func (c *Command) HasFlags() bool { - return c.Flags().HasFlags() -} - -// HasPersistentFlags checks if the command contains persistent flags. -func (c *Command) HasPersistentFlags() bool { - return c.PersistentFlags().HasFlags() -} - -// HasLocalFlags checks if the command has flags specifically declared locally. -func (c *Command) HasLocalFlags() bool { - return c.LocalFlags().HasFlags() -} - -// HasInheritedFlags checks if the command has flags inherited from its parent command. -func (c *Command) HasInheritedFlags() bool { - return c.InheritedFlags().HasFlags() -} - -// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire -// structure) which are not hidden or deprecated. -func (c *Command) HasAvailableFlags() bool { - return c.Flags().HasAvailableFlags() -} - -// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated. -func (c *Command) HasAvailablePersistentFlags() bool { - return c.PersistentFlags().HasAvailableFlags() -} - -// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden -// or deprecated. -func (c *Command) HasAvailableLocalFlags() bool { - return c.LocalFlags().HasAvailableFlags() -} - -// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are -// not hidden or deprecated. -func (c *Command) HasAvailableInheritedFlags() bool { - return c.InheritedFlags().HasAvailableFlags() -} - -// Flag climbs up the command tree looking for matching flag. -func (c *Command) Flag(name string) (flag *flag.Flag) { - flag = c.Flags().Lookup(name) - - if flag == nil { - flag = c.persistentFlag(name) - } - - return -} - -// Recursively find matching persistent flag. -func (c *Command) persistentFlag(name string) (flag *flag.Flag) { - if c.HasPersistentFlags() { - flag = c.PersistentFlags().Lookup(name) - } - - if flag == nil { - c.updateParentsPflags() - flag = c.parentsPflags.Lookup(name) - } - return -} - -// ParseFlags parses persistent flag tree and local flags. -func (c *Command) ParseFlags(args []string) error { - if c.DisableFlagParsing { - return nil - } - - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - beforeErrorBufLen := c.flagErrorBuf.Len() - c.mergePersistentFlags() - - // do it here after merging all flags and just before parse - c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) - - err := c.Flags().Parse(args) - // Print warnings if they occurred (e.g. deprecated flag messages). - if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil { - c.Print(c.flagErrorBuf.String()) - } - - return err -} - -// Parent returns a commands parent command. -func (c *Command) Parent() *Command { - return c.parent -} - -// mergePersistentFlags merges c.PersistentFlags() to c.Flags() -// and adds missing persistent flags of all parents. -func (c *Command) mergePersistentFlags() { - c.updateParentsPflags() - c.Flags().AddFlagSet(c.PersistentFlags()) - c.Flags().AddFlagSet(c.parentsPflags) -} - -// updateParentsPflags updates c.parentsPflags by adding -// new persistent flags of all parents. -// If c.parentsPflags == nil, it makes new. -func (c *Command) updateParentsPflags() { - if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.parentsPflags.SetOutput(c.flagErrorBuf) - c.parentsPflags.SortFlags = false - } - - if c.globNormFunc != nil { - c.parentsPflags.SetNormalizeFunc(c.globNormFunc) - } - - c.Root().PersistentFlags().AddFlagSet(flag.CommandLine) - - c.VisitParents(func(parent *Command) { - c.parentsPflags.AddFlagSet(parent.PersistentFlags()) - }) -} - -// commandNameMatches checks if two command names are equal -// taking into account case sensitivity according to -// EnableCaseInsensitive global configuration. -func commandNameMatches(s string, t string) bool { - if EnableCaseInsensitive { - return strings.EqualFold(s, t) - } - - return s == t -} diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go deleted file mode 100644 index 307f0c127f..0000000000 --- a/vendor/github.com/spf13/cobra/command_notwin.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows -// +build !windows - -package cobra - -var preExecHookFn func(*Command) diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go deleted file mode 100644 index adbef395c2..0000000000 --- a/vendor/github.com/spf13/cobra/command_win.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build windows -// +build windows - -package cobra - -import ( - "fmt" - "os" - "time" - - "github.com/inconshreveable/mousetrap" -) - -var preExecHookFn = preExecHook - -func preExecHook(c *Command) { - if MousetrapHelpText != "" && mousetrap.StartedByExplorer() { - c.Print(MousetrapHelpText) - if MousetrapDisplayDuration > 0 { - time.Sleep(MousetrapDisplayDuration) - } else { - c.Println("Press return to continue...") - fmt.Scanln() - } - os.Exit(1) - } -} diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go deleted file mode 100644 index b60f6b2000..0000000000 --- a/vendor/github.com/spf13/cobra/completions.go +++ /dev/null @@ -1,901 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cobra - -import ( - "fmt" - "os" - "strings" - "sync" - - "github.com/spf13/pflag" -) - -const ( - // ShellCompRequestCmd is the name of the hidden command that is used to request - // completion results from the program. It is used by the shell completion scripts. - ShellCompRequestCmd = "__complete" - // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request - // completion results without their description. It is used by the shell completion scripts. - ShellCompNoDescRequestCmd = "__completeNoDesc" -) - -// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it. -var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} - -// lock for reading and writing from flagCompletionFunctions -var flagCompletionMutex = &sync.RWMutex{} - -// ShellCompDirective is a bit map representing the different behaviors the shell -// can be instructed to have once completions have been provided. -type ShellCompDirective int - -type flagCompError struct { - subCommand string - flagName string -} - -func (e *flagCompError) Error() string { - return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'" -} - -const ( - // ShellCompDirectiveError indicates an error occurred and completions should be ignored. - ShellCompDirectiveError ShellCompDirective = 1 << iota - - // ShellCompDirectiveNoSpace indicates that the shell should not add a space - // after the completion even if there is a single completion provided. - ShellCompDirectiveNoSpace - - // ShellCompDirectiveNoFileComp indicates that the shell should not provide - // file completion even when no completion is provided. - ShellCompDirectiveNoFileComp - - // ShellCompDirectiveFilterFileExt indicates that the provided completions - // should be used as file extension filters. - // For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename() - // is a shortcut to using this directive explicitly. The BashCompFilenameExt - // annotation can also be used to obtain the same behavior for flags. - ShellCompDirectiveFilterFileExt - - // ShellCompDirectiveFilterDirs indicates that only directory names should - // be provided in file completion. To request directory names within another - // directory, the returned completions should specify the directory within - // which to search. The BashCompSubdirsInDir annotation can be used to - // obtain the same behavior but only for flags. - ShellCompDirectiveFilterDirs - - // ShellCompDirectiveKeepOrder indicates that the shell should preserve the order - // in which the completions are provided - ShellCompDirectiveKeepOrder - - // =========================================================================== - - // All directives using iota should be above this one. - // For internal use. - shellCompDirectiveMaxValue - - // ShellCompDirectiveDefault indicates to let the shell perform its default - // behavior after completions have been provided. - // This one must be last to avoid messing up the iota count. - ShellCompDirectiveDefault ShellCompDirective = 0 -) - -const ( - // Constants for the completion command - compCmdName = "completion" - compCmdNoDescFlagName = "no-descriptions" - compCmdNoDescFlagDesc = "disable completion descriptions" - compCmdNoDescFlagDefault = false -) - -// CompletionOptions are the options to control shell completion -type CompletionOptions struct { - // DisableDefaultCmd prevents Cobra from creating a default 'completion' command - DisableDefaultCmd bool - // DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag - // for shells that support completion descriptions - DisableNoDescFlag bool - // DisableDescriptions turns off all completion descriptions for shells - // that support them - DisableDescriptions bool - // HiddenDefaultCmd makes the default 'completion' command hidden - HiddenDefaultCmd bool -} - -// NoFileCompletions can be used to disable file completion for commands that should -// not trigger file completions. -func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - return nil, ShellCompDirectiveNoFileComp -} - -// FixedCompletions can be used to create a completion function which always -// returns the same results. -func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - return choices, directive - } -} - -// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. -func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { - flag := c.Flag(flagName) - if flag == nil { - return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) - } - flagCompletionMutex.Lock() - defer flagCompletionMutex.Unlock() - - if _, exists := flagCompletionFunctions[flag]; exists { - return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName) - } - flagCompletionFunctions[flag] = f - return nil -} - -// GetFlagCompletionFunc returns the completion function for the given flag of the command, if available. -func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) { - flag := c.Flag(flagName) - if flag == nil { - return nil, false - } - - flagCompletionMutex.RLock() - defer flagCompletionMutex.RUnlock() - - completionFunc, exists := flagCompletionFunctions[flag] - return completionFunc, exists -} - -// Returns a string listing the different directive enabled in the specified parameter -func (d ShellCompDirective) string() string { - var directives []string - if d&ShellCompDirectiveError != 0 { - directives = append(directives, "ShellCompDirectiveError") - } - if d&ShellCompDirectiveNoSpace != 0 { - directives = append(directives, "ShellCompDirectiveNoSpace") - } - if d&ShellCompDirectiveNoFileComp != 0 { - directives = append(directives, "ShellCompDirectiveNoFileComp") - } - if d&ShellCompDirectiveFilterFileExt != 0 { - directives = append(directives, "ShellCompDirectiveFilterFileExt") - } - if d&ShellCompDirectiveFilterDirs != 0 { - directives = append(directives, "ShellCompDirectiveFilterDirs") - } - if d&ShellCompDirectiveKeepOrder != 0 { - directives = append(directives, "ShellCompDirectiveKeepOrder") - } - if len(directives) == 0 { - directives = append(directives, "ShellCompDirectiveDefault") - } - - if d >= shellCompDirectiveMaxValue { - return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d) - } - return strings.Join(directives, ", ") -} - -// initCompleteCmd adds a special hidden command that can be used to request custom completions. -func (c *Command) initCompleteCmd(args []string) { - completeCmd := &Command{ - Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd), - Aliases: []string{ShellCompNoDescRequestCmd}, - DisableFlagsInUseLine: true, - Hidden: true, - DisableFlagParsing: true, - Args: MinimumNArgs(1), - Short: "Request shell completion choices for the specified command-line", - Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s", - "to request completion choices for the specified command-line.", ShellCompRequestCmd), - Run: func(cmd *Command, args []string) { - finalCmd, completions, directive, err := cmd.getCompletions(args) - if err != nil { - CompErrorln(err.Error()) - // Keep going for multiple reasons: - // 1- There could be some valid completions even though there was an error - // 2- Even without completions, we need to print the directive - } - - noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) - for _, comp := range completions { - if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable { - // Remove all activeHelp entries in this case - if strings.HasPrefix(comp, activeHelpMarker) { - continue - } - } - if noDescriptions { - // Remove any description that may be included following a tab character. - comp = strings.Split(comp, "\t")[0] - } - - // Make sure we only write the first line to the output. - // This is needed if a description contains a linebreak. - // Otherwise the shell scripts will interpret the other lines as new flags - // and could therefore provide a wrong completion. - comp = strings.Split(comp, "\n")[0] - - // Finally trim the completion. This is especially important to get rid - // of a trailing tab when there are no description following it. - // For example, a sub-command without a description should not be completed - // with a tab at the end (or else zsh will show a -- following it - // although there is no description). - comp = strings.TrimSpace(comp) - - // Print each possible completion to stdout for the completion script to consume. - fmt.Fprintln(finalCmd.OutOrStdout(), comp) - } - - // As the last printout, print the completion directive for the completion script to parse. - // The directive integer must be that last character following a single colon (:). - // The completion script expects : - fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) - - // Print some helpful info to stderr for the user to understand. - // Output from stderr must be ignored by the completion script. - fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string()) - }, - } - c.AddCommand(completeCmd) - subCmd, _, err := c.Find(args) - if err != nil || subCmd.Name() != ShellCompRequestCmd { - // Only create this special command if it is actually being called. - // This reduces possible side-effects of creating such a command; - // for example, having this command would cause problems to a - // cobra program that only consists of the root command, since this - // command would cause the root command to suddenly have a subcommand. - c.RemoveCommand(completeCmd) - } -} - -func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) { - // The last argument, which is not completely typed by the user, - // should not be part of the list of arguments - toComplete := args[len(args)-1] - trimmedArgs := args[:len(args)-1] - - var finalCmd *Command - var finalArgs []string - var err error - // Find the real command for which completion must be performed - // check if we need to traverse here to parse local flags on parent commands - if c.Root().TraverseChildren { - finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs) - } else { - // For Root commands that don't specify any value for their Args fields, when we call - // Find(), if those Root commands don't have any sub-commands, they will accept arguments. - // However, because we have added the __complete sub-command in the current code path, the - // call to Find() -> legacyArgs() will return an error if there are any arguments. - // To avoid this, we first remove the __complete command to get back to having no sub-commands. - rootCmd := c.Root() - if len(rootCmd.Commands()) == 1 { - rootCmd.RemoveCommand(c) - } - - finalCmd, finalArgs, err = rootCmd.Find(trimmedArgs) - } - if err != nil { - // Unable to find the real command. E.g., someInvalidCmd - return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) - } - finalCmd.ctx = c.ctx - - // These flags are normally added when `execute()` is called on `finalCmd`, - // however, when doing completion, we don't call `finalCmd.execute()`. - // Let's add the --help and --version flag ourselves but only if the finalCmd - // has not disabled flag parsing; if flag parsing is disabled, it is up to the - // finalCmd itself to handle the completion of *all* flags. - if !finalCmd.DisableFlagParsing { - finalCmd.InitDefaultHelpFlag() - finalCmd.InitDefaultVersionFlag() - } - - // Check if we are doing flag value completion before parsing the flags. - // This is important because if we are completing a flag value, we need to also - // remove the flag name argument from the list of finalArgs or else the parsing - // could fail due to an invalid value (incomplete) for the flag. - flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete) - - // Check if interspersed is false or -- was set on a previous arg. - // This works by counting the arguments. Normally -- is not counted as arg but - // if -- was already set or interspersed is false and there is already one arg then - // the extra added -- is counted as arg. - flagCompletion := true - _ = finalCmd.ParseFlags(append(finalArgs, "--")) - newArgCount := finalCmd.Flags().NArg() - - // Parse the flags early so we can check if required flags are set - if err = finalCmd.ParseFlags(finalArgs); err != nil { - return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) - } - - realArgCount := finalCmd.Flags().NArg() - if newArgCount > realArgCount { - // don't do flag completion (see above) - flagCompletion = false - } - // Error while attempting to parse flags - if flagErr != nil { - // If error type is flagCompError and we don't want flagCompletion we should ignore the error - if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { - return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr - } - } - - // Look for the --help or --version flags. If they are present, - // there should be no further completions. - if helpOrVersionFlagPresent(finalCmd) { - return finalCmd, []string{}, ShellCompDirectiveNoFileComp, nil - } - - // We only remove the flags from the arguments if DisableFlagParsing is not set. - // This is important for commands which have requested to do their own flag completion. - if !finalCmd.DisableFlagParsing { - finalArgs = finalCmd.Flags().Args() - } - - if flag != nil && flagCompletion { - // Check if we are completing a flag value subject to annotations - if validExts, present := flag.Annotations[BashCompFilenameExt]; present { - if len(validExts) != 0 { - // File completion filtered by extensions - return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil - } - - // The annotation requests simple file completion. There is no reason to do - // that since it is the default behavior anyway. Let's ignore this annotation - // in case the program also registered a completion function for this flag. - // Even though it is a mistake on the program's side, let's be nice when we can. - } - - if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present { - if len(subDir) == 1 { - // Directory completion from within a directory - return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil - } - // Directory completion - return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil - } - } - - var completions []string - var directive ShellCompDirective - - // Enforce flag groups before doing flag completions - finalCmd.enforceFlagGroupsForCompletion() - - // Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true; - // doing this allows for completion of persistent flag names even for commands that disable flag parsing. - // - // When doing completion of a flag name, as soon as an argument starts with - // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires - // the flag name to be complete - if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion { - // First check for required flags - completions = completeRequireFlags(finalCmd, toComplete) - - // If we have not found any required flags, only then can we show regular flags - if len(completions) == 0 { - doCompleteFlags := func(flag *pflag.Flag) { - if !flag.Changed || - strings.Contains(flag.Value.Type(), "Slice") || - strings.Contains(flag.Value.Type(), "Array") { - // If the flag is not already present, or if it can be specified multiple times (Array or Slice) - // we suggest it as a completion - completions = append(completions, getFlagNameCompletions(flag, toComplete)...) - } - } - - // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands - // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and - // non-inherited flags. - finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteFlags(flag) - }) - // Try to complete non-inherited flags even if DisableFlagParsing==true. - // This allows programs to tell Cobra about flags for completion even - // if the actual parsing of flags is not done by Cobra. - // For instance, Helm uses this to provide flag name completion for - // some of its plugins. - finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteFlags(flag) - }) - } - - directive = ShellCompDirectiveNoFileComp - if len(completions) == 1 && strings.HasSuffix(completions[0], "=") { - // If there is a single completion, the shell usually adds a space - // after the completion. We don't want that if the flag ends with an = - directive = ShellCompDirectiveNoSpace - } - - if !finalCmd.DisableFlagParsing { - // If DisableFlagParsing==false, we have completed the flags as known by Cobra; - // we can return what we found. - // If DisableFlagParsing==true, Cobra may not be aware of all flags, so we - // let the logic continue to see if ValidArgsFunction needs to be called. - return finalCmd, completions, directive, nil - } - } else { - directive = ShellCompDirectiveDefault - if flag == nil { - foundLocalNonPersistentFlag := false - // If TraverseChildren is true on the root command we don't check for - // local flags because we can use a local flag on a parent command - if !finalCmd.Root().TraverseChildren { - // Check if there are any local, non-persistent flags on the command-line - localNonPersistentFlags := finalCmd.LocalNonPersistentFlags() - finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed { - foundLocalNonPersistentFlag = true - } - }) - } - - // Complete subcommand names, including the help command - if len(finalArgs) == 0 && !foundLocalNonPersistentFlag { - // We only complete sub-commands if: - // - there are no arguments on the command-line and - // - there are no local, non-persistent flags on the command-line or TraverseChildren is true - for _, subCmd := range finalCmd.Commands() { - if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { - if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) - } - directive = ShellCompDirectiveNoFileComp - } - } - } - - // Complete required flags even without the '-' prefix - completions = append(completions, completeRequireFlags(finalCmd, toComplete)...) - - // Always complete ValidArgs, even if we are completing a subcommand name. - // This is for commands that have both subcommands and ValidArgs. - if len(finalCmd.ValidArgs) > 0 { - if len(finalArgs) == 0 { - // ValidArgs are only for the first argument - for _, validArg := range finalCmd.ValidArgs { - if strings.HasPrefix(validArg, toComplete) { - completions = append(completions, validArg) - } - } - directive = ShellCompDirectiveNoFileComp - - // If no completions were found within commands or ValidArgs, - // see if there are any ArgAliases that should be completed. - if len(completions) == 0 { - for _, argAlias := range finalCmd.ArgAliases { - if strings.HasPrefix(argAlias, toComplete) { - completions = append(completions, argAlias) - } - } - } - } - - // If there are ValidArgs specified (even if they don't match), we stop completion. - // Only one of ValidArgs or ValidArgsFunction can be used for a single command. - return finalCmd, completions, directive, nil - } - - // Let the logic continue so as to add any ValidArgsFunction completions, - // even if we already found sub-commands. - // This is for commands that have subcommands but also specify a ValidArgsFunction. - } - } - - // Find the completion function for the flag or command - var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) - if flag != nil && flagCompletion { - flagCompletionMutex.RLock() - completionFn = flagCompletionFunctions[flag] - flagCompletionMutex.RUnlock() - } else { - completionFn = finalCmd.ValidArgsFunction - } - if completionFn != nil { - // Go custom completion defined for this flag or command. - // Call the registered completion function to get the completions. - var comps []string - comps, directive = completionFn(finalCmd, finalArgs, toComplete) - completions = append(completions, comps...) - } - - return finalCmd, completions, directive, nil -} - -func helpOrVersionFlagPresent(cmd *Command) bool { - if versionFlag := cmd.Flags().Lookup("version"); versionFlag != nil && - len(versionFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && versionFlag.Changed { - return true - } - if helpFlag := cmd.Flags().Lookup("help"); helpFlag != nil && - len(helpFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && helpFlag.Changed { - return true - } - return false -} - -func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { - if nonCompletableFlag(flag) { - return []string{} - } - - var completions []string - flagName := "--" + flag.Name - if strings.HasPrefix(flagName, toComplete) { - // Flag without the = - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) - - // Why suggest both long forms: --flag and --flag= ? - // This forces the user to *always* have to type either an = or a space after the flag name. - // Let's be nice and avoid making users have to do that. - // Since boolean flags and shortname flags don't show the = form, let's go that route and never show it. - // The = form will still work, we just won't suggest it. - // This also makes the list of suggested flags shorter as we avoid all the = forms. - // - // if len(flag.NoOptDefVal) == 0 { - // // Flag requires a value, so it can be suffixed with = - // flagName += "=" - // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) - // } - } - - flagName = "-" + flag.Shorthand - if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) - } - - return completions -} - -func completeRequireFlags(finalCmd *Command, toComplete string) []string { - var completions []string - - doCompleteRequiredFlags := func(flag *pflag.Flag) { - if _, present := flag.Annotations[BashCompOneRequiredFlag]; present { - if !flag.Changed { - // If the flag is not already present, we suggest it as a completion - completions = append(completions, getFlagNameCompletions(flag, toComplete)...) - } - } - } - - // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands - // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and - // non-inherited flags. - finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteRequiredFlags(flag) - }) - finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteRequiredFlags(flag) - }) - - return completions -} - -func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) { - if finalCmd.DisableFlagParsing { - // We only do flag completion if we are allowed to parse flags - // This is important for commands which have requested to do their own flag completion. - return nil, args, lastArg, nil - } - - var flagName string - trimmedArgs := args - flagWithEqual := false - orgLastArg := lastArg - - // When doing completion of a flag name, as soon as an argument starts with - // a '-' we know it is a flag. We cannot use isFlagArg() here as that function - // requires the flag name to be complete - if len(lastArg) > 0 && lastArg[0] == '-' { - if index := strings.Index(lastArg, "="); index >= 0 { - // Flag with an = - if strings.HasPrefix(lastArg[:index], "--") { - // Flag has full name - flagName = lastArg[2:index] - } else { - // Flag is shorthand - // We have to get the last shorthand flag name - // e.g. `-asd` => d to provide the correct completion - // https://github.com/spf13/cobra/issues/1257 - flagName = lastArg[index-1 : index] - } - lastArg = lastArg[index+1:] - flagWithEqual = true - } else { - // Normal flag completion - return nil, args, lastArg, nil - } - } - - if len(flagName) == 0 { - if len(args) > 0 { - prevArg := args[len(args)-1] - if isFlagArg(prevArg) { - // Only consider the case where the flag does not contain an =. - // If the flag contains an = it means it has already been fully processed, - // so we don't need to deal with it here. - if index := strings.Index(prevArg, "="); index < 0 { - if strings.HasPrefix(prevArg, "--") { - // Flag has full name - flagName = prevArg[2:] - } else { - // Flag is shorthand - // We have to get the last shorthand flag name - // e.g. `-asd` => d to provide the correct completion - // https://github.com/spf13/cobra/issues/1257 - flagName = prevArg[len(prevArg)-1:] - } - // Remove the uncompleted flag or else there could be an error created - // for an invalid value for that flag - trimmedArgs = args[:len(args)-1] - } - } - } - } - - if len(flagName) == 0 { - // Not doing flag completion - return nil, trimmedArgs, lastArg, nil - } - - flag := findFlag(finalCmd, flagName) - if flag == nil { - // Flag not supported by this command, the interspersed option might be set so return the original args - return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName} - } - - if !flagWithEqual { - if len(flag.NoOptDefVal) != 0 { - // We had assumed dealing with a two-word flag but the flag is a boolean flag. - // In that case, there is no value following it, so we are not really doing flag completion. - // Reset everything to do noun completion. - trimmedArgs = args - flag = nil - } - } - - return flag, trimmedArgs, lastArg, nil -} - -// InitDefaultCompletionCmd adds a default 'completion' command to c. -// This function will do nothing if any of the following is true: -// 1- the feature has been explicitly disabled by the program, -// 2- c has no subcommands (to avoid creating one), -// 3- c already has a 'completion' command provided by the program. -func (c *Command) InitDefaultCompletionCmd() { - if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() { - return - } - - for _, cmd := range c.commands { - if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) { - // A completion command is already available - return - } - } - - haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions - - completionCmd := &Command{ - Use: compCmdName, - Short: "Generate the autocompletion script for the specified shell", - Long: fmt.Sprintf(`Generate the autocompletion script for %[1]s for the specified shell. -See each sub-command's help for details on how to use the generated script. -`, c.Root().Name()), - Args: NoArgs, - ValidArgsFunction: NoFileCompletions, - Hidden: c.CompletionOptions.HiddenDefaultCmd, - GroupID: c.completionCommandGroupID, - } - c.AddCommand(completionCmd) - - out := c.OutOrStdout() - noDesc := c.CompletionOptions.DisableDescriptions - shortDesc := "Generate the autocompletion script for %s" - bash := &Command{ - Use: "bash", - Short: fmt.Sprintf(shortDesc, "bash"), - Long: fmt.Sprintf(`Generate the autocompletion script for the bash shell. - -This script depends on the 'bash-completion' package. -If it is not installed already, you can install it via your OS's package manager. - -To load completions in your current shell session: - - source <(%[1]s completion bash) - -To load completions for every new session, execute once: - -#### Linux: - - %[1]s completion bash > /etc/bash_completion.d/%[1]s - -#### macOS: - - %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s - -You will need to start a new shell for this setup to take effect. -`, c.Root().Name()), - Args: NoArgs, - DisableFlagsInUseLine: true, - ValidArgsFunction: NoFileCompletions, - RunE: func(cmd *Command, args []string) error { - return cmd.Root().GenBashCompletionV2(out, !noDesc) - }, - } - if haveNoDescFlag { - bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) - } - - zsh := &Command{ - Use: "zsh", - Short: fmt.Sprintf(shortDesc, "zsh"), - Long: fmt.Sprintf(`Generate the autocompletion script for the zsh shell. - -If shell completion is not already enabled in your environment you will need -to enable it. You can execute the following once: - - echo "autoload -U compinit; compinit" >> ~/.zshrc - -To load completions in your current shell session: - - source <(%[1]s completion zsh) - -To load completions for every new session, execute once: - -#### Linux: - - %[1]s completion zsh > "${fpath[1]}/_%[1]s" - -#### macOS: - - %[1]s completion zsh > $(brew --prefix)/share/zsh/site-functions/_%[1]s - -You will need to start a new shell for this setup to take effect. -`, c.Root().Name()), - Args: NoArgs, - ValidArgsFunction: NoFileCompletions, - RunE: func(cmd *Command, args []string) error { - if noDesc { - return cmd.Root().GenZshCompletionNoDesc(out) - } - return cmd.Root().GenZshCompletion(out) - }, - } - if haveNoDescFlag { - zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) - } - - fish := &Command{ - Use: "fish", - Short: fmt.Sprintf(shortDesc, "fish"), - Long: fmt.Sprintf(`Generate the autocompletion script for the fish shell. - -To load completions in your current shell session: - - %[1]s completion fish | source - -To load completions for every new session, execute once: - - %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish - -You will need to start a new shell for this setup to take effect. -`, c.Root().Name()), - Args: NoArgs, - ValidArgsFunction: NoFileCompletions, - RunE: func(cmd *Command, args []string) error { - return cmd.Root().GenFishCompletion(out, !noDesc) - }, - } - if haveNoDescFlag { - fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) - } - - powershell := &Command{ - Use: "powershell", - Short: fmt.Sprintf(shortDesc, "powershell"), - Long: fmt.Sprintf(`Generate the autocompletion script for powershell. - -To load completions in your current shell session: - - %[1]s completion powershell | Out-String | Invoke-Expression - -To load completions for every new session, add the output of the above command -to your powershell profile. -`, c.Root().Name()), - Args: NoArgs, - ValidArgsFunction: NoFileCompletions, - RunE: func(cmd *Command, args []string) error { - if noDesc { - return cmd.Root().GenPowerShellCompletion(out) - } - return cmd.Root().GenPowerShellCompletionWithDesc(out) - - }, - } - if haveNoDescFlag { - powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) - } - - completionCmd.AddCommand(bash, zsh, fish, powershell) -} - -func findFlag(cmd *Command, name string) *pflag.Flag { - flagSet := cmd.Flags() - if len(name) == 1 { - // First convert the short flag into a long flag - // as the cmd.Flag() search only accepts long flags - if short := flagSet.ShorthandLookup(name); short != nil { - name = short.Name - } else { - set := cmd.InheritedFlags() - if short = set.ShorthandLookup(name); short != nil { - name = short.Name - } else { - return nil - } - } - } - return cmd.Flag(name) -} - -// CompDebug prints the specified string to the same file as where the -// completion script prints its logs. -// Note that completion printouts should never be on stdout as they would -// be wrongly interpreted as actual completion choices by the completion script. -func CompDebug(msg string, printToStdErr bool) { - msg = fmt.Sprintf("[Debug] %s", msg) - - // Such logs are only printed when the user has set the environment - // variable BASH_COMP_DEBUG_FILE to the path of some file to be used. - if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" { - f, err := os.OpenFile(path, - os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err == nil { - defer f.Close() - WriteStringAndCheck(f, msg) - } - } - - if printToStdErr { - // Must print to stderr for this not to be read by the completion script. - fmt.Fprint(os.Stderr, msg) - } -} - -// CompDebugln prints the specified string with a newline at the end -// to the same file as where the completion script prints its logs. -// Such logs are only printed when the user has set the environment -// variable BASH_COMP_DEBUG_FILE to the path of some file to be used. -func CompDebugln(msg string, printToStdErr bool) { - CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr) -} - -// CompError prints the specified completion message to stderr. -func CompError(msg string) { - msg = fmt.Sprintf("[Error] %s", msg) - CompDebug(msg, true) -} - -// CompErrorln prints the specified completion message to stderr with a newline at the end. -func CompErrorln(msg string) { - CompError(fmt.Sprintf("%s\n", msg)) -} diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.go b/vendor/github.com/spf13/cobra/doc/man_docs.go deleted file mode 100644 index b8c15ce885..0000000000 --- a/vendor/github.com/spf13/cobra/doc/man_docs.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package doc - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - "github.com/cpuguy83/go-md2man/v2/md2man" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -// GenManTree will generate a man page for this command and all descendants -// in the directory given. The header may be nil. This function may not work -// correctly if your command names have `-` in them. If you have `cmd` with two -// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third` -// it is undefined which help output will be in the file `cmd-sub-third.1`. -func GenManTree(cmd *cobra.Command, header *GenManHeader, dir string) error { - return GenManTreeFromOpts(cmd, GenManTreeOptions{ - Header: header, - Path: dir, - CommandSeparator: "-", - }) -} - -// GenManTreeFromOpts generates a man page for the command and all descendants. -// The pages are written to the opts.Path directory. -func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error { - header := opts.Header - if header == nil { - header = &GenManHeader{} - } - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { - continue - } - if err := GenManTreeFromOpts(c, opts); err != nil { - return err - } - } - section := "1" - if header.Section != "" { - section = header.Section - } - - separator := "_" - if opts.CommandSeparator != "" { - separator = opts.CommandSeparator - } - basename := strings.ReplaceAll(cmd.CommandPath(), " ", separator) - filename := filepath.Join(opts.Path, basename+"."+section) - f, err := os.Create(filename) - if err != nil { - return err - } - defer f.Close() - - headerCopy := *header - return GenMan(cmd, &headerCopy, f) -} - -// GenManTreeOptions is the options for generating the man pages. -// Used only in GenManTreeFromOpts. -type GenManTreeOptions struct { - Header *GenManHeader - Path string - CommandSeparator string -} - -// GenManHeader is a lot like the .TH header at the start of man pages. These -// include the title, section, date, source, and manual. We will use the -// current time if Date is unset and will use "Auto generated by spf13/cobra" -// if the Source is unset. -type GenManHeader struct { - Title string - Section string - Date *time.Time - date string - Source string - Manual string -} - -// GenMan will generate a man page for the given command and write it to -// w. The header argument may be nil, however obviously w may not. -func GenMan(cmd *cobra.Command, header *GenManHeader, w io.Writer) error { - if header == nil { - header = &GenManHeader{} - } - if err := fillHeader(header, cmd.CommandPath(), cmd.DisableAutoGenTag); err != nil { - return err - } - - b := genMan(cmd, header) - _, err := w.Write(md2man.Render(b)) - return err -} - -func fillHeader(header *GenManHeader, name string, disableAutoGen bool) error { - if header.Title == "" { - header.Title = strings.ToUpper(strings.ReplaceAll(name, " ", "\\-")) - } - if header.Section == "" { - header.Section = "1" - } - if header.Date == nil { - now := time.Now() - if epoch := os.Getenv("SOURCE_DATE_EPOCH"); epoch != "" { - unixEpoch, err := strconv.ParseInt(epoch, 10, 64) - if err != nil { - return fmt.Errorf("invalid SOURCE_DATE_EPOCH: %v", err) - } - now = time.Unix(unixEpoch, 0) - } - header.Date = &now - } - header.date = (*header.Date).Format("Jan 2006") - if header.Source == "" && !disableAutoGen { - header.Source = "Auto generated by spf13/cobra" - } - return nil -} - -func manPreamble(buf io.StringWriter, header *GenManHeader, cmd *cobra.Command, dashedName string) { - description := cmd.Long - if len(description) == 0 { - description = cmd.Short - } - - cobra.WriteStringAndCheck(buf, fmt.Sprintf(`%% "%s" "%s" "%s" "%s" "%s" -# NAME -`, header.Title, header.Section, header.date, header.Source, header.Manual)) - cobra.WriteStringAndCheck(buf, fmt.Sprintf("%s \\- %s\n\n", dashedName, cmd.Short)) - cobra.WriteStringAndCheck(buf, "# SYNOPSIS\n") - cobra.WriteStringAndCheck(buf, fmt.Sprintf("**%s**\n\n", cmd.UseLine())) - cobra.WriteStringAndCheck(buf, "# DESCRIPTION\n") - cobra.WriteStringAndCheck(buf, description+"\n\n") -} - -func manPrintFlags(buf io.StringWriter, flags *pflag.FlagSet) { - flags.VisitAll(func(flag *pflag.Flag) { - if len(flag.Deprecated) > 0 || flag.Hidden { - return - } - format := "" - if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 { - format = fmt.Sprintf("**-%s**, **--%s**", flag.Shorthand, flag.Name) - } else { - format = fmt.Sprintf("**--%s**", flag.Name) - } - if len(flag.NoOptDefVal) > 0 { - format += "[" - } - if flag.Value.Type() == "string" { - // put quotes on the value - format += "=%q" - } else { - format += "=%s" - } - if len(flag.NoOptDefVal) > 0 { - format += "]" - } - format += "\n\t%s\n\n" - cobra.WriteStringAndCheck(buf, fmt.Sprintf(format, flag.DefValue, flag.Usage)) - }) -} - -func manPrintOptions(buf io.StringWriter, command *cobra.Command) { - flags := command.NonInheritedFlags() - if flags.HasAvailableFlags() { - cobra.WriteStringAndCheck(buf, "# OPTIONS\n") - manPrintFlags(buf, flags) - cobra.WriteStringAndCheck(buf, "\n") - } - flags = command.InheritedFlags() - if flags.HasAvailableFlags() { - cobra.WriteStringAndCheck(buf, "# OPTIONS INHERITED FROM PARENT COMMANDS\n") - manPrintFlags(buf, flags) - cobra.WriteStringAndCheck(buf, "\n") - } -} - -func genMan(cmd *cobra.Command, header *GenManHeader) []byte { - cmd.InitDefaultHelpCmd() - cmd.InitDefaultHelpFlag() - - // something like `rootcmd-subcmd1-subcmd2` - dashCommandName := strings.ReplaceAll(cmd.CommandPath(), " ", "-") - - buf := new(bytes.Buffer) - - manPreamble(buf, header, cmd, dashCommandName) - manPrintOptions(buf, cmd) - if len(cmd.Example) > 0 { - buf.WriteString("# EXAMPLE\n") - buf.WriteString(fmt.Sprintf("```\n%s\n```\n", cmd.Example)) - } - if hasSeeAlso(cmd) { - buf.WriteString("# SEE ALSO\n") - seealsos := make([]string, 0) - if cmd.HasParent() { - parentPath := cmd.Parent().CommandPath() - dashParentPath := strings.ReplaceAll(parentPath, " ", "-") - seealso := fmt.Sprintf("**%s(%s)**", dashParentPath, header.Section) - seealsos = append(seealsos, seealso) - cmd.VisitParents(func(c *cobra.Command) { - if c.DisableAutoGenTag { - cmd.DisableAutoGenTag = c.DisableAutoGenTag - } - }) - } - children := cmd.Commands() - sort.Sort(byName(children)) - for _, c := range children { - if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { - continue - } - seealso := fmt.Sprintf("**%s-%s(%s)**", dashCommandName, c.Name(), header.Section) - seealsos = append(seealsos, seealso) - } - buf.WriteString(strings.Join(seealsos, ", ") + "\n") - } - if !cmd.DisableAutoGenTag { - buf.WriteString(fmt.Sprintf("# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006"))) - } - return buf.Bytes() -} diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.go b/vendor/github.com/spf13/cobra/doc/md_docs.go deleted file mode 100644 index f98fe2a3b8..0000000000 --- a/vendor/github.com/spf13/cobra/doc/md_docs.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package doc - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/spf13/cobra" -) - -const markdownExtension = ".md" - -func printOptions(buf *bytes.Buffer, cmd *cobra.Command, name string) error { - flags := cmd.NonInheritedFlags() - flags.SetOutput(buf) - if flags.HasAvailableFlags() { - buf.WriteString("### Options\n\n```\n") - flags.PrintDefaults() - buf.WriteString("```\n\n") - } - - parentFlags := cmd.InheritedFlags() - parentFlags.SetOutput(buf) - if parentFlags.HasAvailableFlags() { - buf.WriteString("### Options inherited from parent commands\n\n```\n") - parentFlags.PrintDefaults() - buf.WriteString("```\n\n") - } - return nil -} - -// GenMarkdown creates markdown output. -func GenMarkdown(cmd *cobra.Command, w io.Writer) error { - return GenMarkdownCustom(cmd, w, func(s string) string { return s }) -} - -// GenMarkdownCustom creates custom markdown output. -func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error { - cmd.InitDefaultHelpCmd() - cmd.InitDefaultHelpFlag() - - buf := new(bytes.Buffer) - name := cmd.CommandPath() - - buf.WriteString("## " + name + "\n\n") - buf.WriteString(cmd.Short + "\n\n") - if len(cmd.Long) > 0 { - buf.WriteString("### Synopsis\n\n") - buf.WriteString(cmd.Long + "\n\n") - } - - if cmd.Runnable() { - buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.UseLine())) - } - - if len(cmd.Example) > 0 { - buf.WriteString("### Examples\n\n") - buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example)) - } - - if err := printOptions(buf, cmd, name); err != nil { - return err - } - if hasSeeAlso(cmd) { - buf.WriteString("### SEE ALSO\n\n") - if cmd.HasParent() { - parent := cmd.Parent() - pname := parent.CommandPath() - link := pname + markdownExtension - link = strings.ReplaceAll(link, " ", "_") - buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short)) - cmd.VisitParents(func(c *cobra.Command) { - if c.DisableAutoGenTag { - cmd.DisableAutoGenTag = c.DisableAutoGenTag - } - }) - } - - children := cmd.Commands() - sort.Sort(byName(children)) - - for _, child := range children { - if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { - continue - } - cname := name + " " + child.Name() - link := cname + markdownExtension - link = strings.ReplaceAll(link, " ", "_") - buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short)) - } - buf.WriteString("\n") - } - if !cmd.DisableAutoGenTag { - buf.WriteString("###### Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "\n") - } - _, err := buf.WriteTo(w) - return err -} - -// GenMarkdownTree will generate a markdown page for this command and all -// descendants in the directory given. The header may be nil. -// This function may not work correctly if your command names have `-` in them. -// If you have `cmd` with two subcmds, `sub` and `sub-third`, -// and `sub` has a subcommand called `third`, it is undefined which -// help output will be in the file `cmd-sub-third.1`. -func GenMarkdownTree(cmd *cobra.Command, dir string) error { - identity := func(s string) string { return s } - emptyStr := func(s string) string { return "" } - return GenMarkdownTreeCustom(cmd, dir, emptyStr, identity) -} - -// GenMarkdownTreeCustom is the the same as GenMarkdownTree, but -// with custom filePrepender and linkHandler. -func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error { - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { - continue - } - if err := GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler); err != nil { - return err - } - } - - basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + markdownExtension - filename := filepath.Join(dir, basename) - f, err := os.Create(filename) - if err != nil { - return err - } - defer f.Close() - - if _, err := io.WriteString(f, filePrepender(filename)); err != nil { - return err - } - if err := GenMarkdownCustom(cmd, f, linkHandler); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.go b/vendor/github.com/spf13/cobra/doc/rest_docs.go deleted file mode 100644 index 2cca6fd778..0000000000 --- a/vendor/github.com/spf13/cobra/doc/rest_docs.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package doc - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/spf13/cobra" -) - -func printOptionsReST(buf *bytes.Buffer, cmd *cobra.Command, name string) error { - flags := cmd.NonInheritedFlags() - flags.SetOutput(buf) - if flags.HasAvailableFlags() { - buf.WriteString("Options\n") - buf.WriteString("~~~~~~~\n\n::\n\n") - flags.PrintDefaults() - buf.WriteString("\n") - } - - parentFlags := cmd.InheritedFlags() - parentFlags.SetOutput(buf) - if parentFlags.HasAvailableFlags() { - buf.WriteString("Options inherited from parent commands\n") - buf.WriteString("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n") - parentFlags.PrintDefaults() - buf.WriteString("\n") - } - return nil -} - -// defaultLinkHandler for default ReST hyperlink markup -func defaultLinkHandler(name, ref string) string { - return fmt.Sprintf("`%s <%s.rst>`_", name, ref) -} - -// GenReST creates reStructured Text output. -func GenReST(cmd *cobra.Command, w io.Writer) error { - return GenReSTCustom(cmd, w, defaultLinkHandler) -} - -// GenReSTCustom creates custom reStructured Text output. -func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, string) string) error { - cmd.InitDefaultHelpCmd() - cmd.InitDefaultHelpFlag() - - buf := new(bytes.Buffer) - name := cmd.CommandPath() - - short := cmd.Short - long := cmd.Long - if len(long) == 0 { - long = short - } - ref := strings.ReplaceAll(name, " ", "_") - - buf.WriteString(".. _" + ref + ":\n\n") - buf.WriteString(name + "\n") - buf.WriteString(strings.Repeat("-", len(name)) + "\n\n") - buf.WriteString(short + "\n\n") - buf.WriteString("Synopsis\n") - buf.WriteString("~~~~~~~~\n\n") - buf.WriteString("\n" + long + "\n\n") - - if cmd.Runnable() { - buf.WriteString(fmt.Sprintf("::\n\n %s\n\n", cmd.UseLine())) - } - - if len(cmd.Example) > 0 { - buf.WriteString("Examples\n") - buf.WriteString("~~~~~~~~\n\n") - buf.WriteString(fmt.Sprintf("::\n\n%s\n\n", indentString(cmd.Example, " "))) - } - - if err := printOptionsReST(buf, cmd, name); err != nil { - return err - } - if hasSeeAlso(cmd) { - buf.WriteString("SEE ALSO\n") - buf.WriteString("~~~~~~~~\n\n") - if cmd.HasParent() { - parent := cmd.Parent() - pname := parent.CommandPath() - ref = strings.ReplaceAll(pname, " ", "_") - buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(pname, ref), parent.Short)) - cmd.VisitParents(func(c *cobra.Command) { - if c.DisableAutoGenTag { - cmd.DisableAutoGenTag = c.DisableAutoGenTag - } - }) - } - - children := cmd.Commands() - sort.Sort(byName(children)) - - for _, child := range children { - if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { - continue - } - cname := name + " " + child.Name() - ref = strings.ReplaceAll(cname, " ", "_") - buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(cname, ref), child.Short)) - } - buf.WriteString("\n") - } - if !cmd.DisableAutoGenTag { - buf.WriteString("*Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "*\n") - } - _, err := buf.WriteTo(w) - return err -} - -// GenReSTTree will generate a ReST page for this command and all -// descendants in the directory given. -// This function may not work correctly if your command names have `-` in them. -// If you have `cmd` with two subcmds, `sub` and `sub-third`, -// and `sub` has a subcommand called `third`, it is undefined which -// help output will be in the file `cmd-sub-third.1`. -func GenReSTTree(cmd *cobra.Command, dir string) error { - emptyStr := func(s string) string { return "" } - return GenReSTTreeCustom(cmd, dir, emptyStr, defaultLinkHandler) -} - -// GenReSTTreeCustom is the the same as GenReSTTree, but -// with custom filePrepender and linkHandler. -func GenReSTTreeCustom(cmd *cobra.Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error { - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { - continue - } - if err := GenReSTTreeCustom(c, dir, filePrepender, linkHandler); err != nil { - return err - } - } - - basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + ".rst" - filename := filepath.Join(dir, basename) - f, err := os.Create(filename) - if err != nil { - return err - } - defer f.Close() - - if _, err := io.WriteString(f, filePrepender(filename)); err != nil { - return err - } - if err := GenReSTCustom(cmd, f, linkHandler); err != nil { - return err - } - return nil -} - -// indentString adapted from: https://github.com/kr/text/blob/main/indent.go -func indentString(s, p string) string { - var res []byte - b := []byte(s) - prefix := []byte(p) - bol := true - for _, c := range b { - if bol && c != '\n' { - res = append(res, prefix...) - } - res = append(res, c) - bol = c == '\n' - } - return string(res) -} diff --git a/vendor/github.com/spf13/cobra/doc/util.go b/vendor/github.com/spf13/cobra/doc/util.go deleted file mode 100644 index 0aaa07a166..0000000000 --- a/vendor/github.com/spf13/cobra/doc/util.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package doc - -import ( - "strings" - - "github.com/spf13/cobra" -) - -// Test to see if we have a reason to print See Also information in docs -// Basically this is a test for a parent command or a subcommand which is -// both not deprecated and not the autogenerated help command. -func hasSeeAlso(cmd *cobra.Command) bool { - if cmd.HasParent() { - return true - } - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { - continue - } - return true - } - return false -} - -// Temporary workaround for yaml lib generating incorrect yaml with long strings -// that do not contain \n. -func forceMultiLine(s string) string { - if len(s) > 60 && !strings.Contains(s, "\n") { - s = s + "\n" - } - return s -} - -type byName []*cobra.Command - -func (s byName) Len() int { return len(s) } -func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.go b/vendor/github.com/spf13/cobra/doc/yaml_docs.go deleted file mode 100644 index 2b26d6ec0f..0000000000 --- a/vendor/github.com/spf13/cobra/doc/yaml_docs.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package doc - -import ( - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "gopkg.in/yaml.v3" -) - -type cmdOption struct { - Name string - Shorthand string `yaml:",omitempty"` - DefaultValue string `yaml:"default_value,omitempty"` - Usage string `yaml:",omitempty"` -} - -type cmdDoc struct { - Name string - Synopsis string `yaml:",omitempty"` - Description string `yaml:",omitempty"` - Usage string `yaml:",omitempty"` - Options []cmdOption `yaml:",omitempty"` - InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"` - Example string `yaml:",omitempty"` - SeeAlso []string `yaml:"see_also,omitempty"` -} - -// GenYamlTree creates yaml structured ref files for this command and all descendants -// in the directory given. This function may not work -// correctly if your command names have `-` in them. If you have `cmd` with two -// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third` -// it is undefined which help output will be in the file `cmd-sub-third.1`. -func GenYamlTree(cmd *cobra.Command, dir string) error { - identity := func(s string) string { return s } - emptyStr := func(s string) string { return "" } - return GenYamlTreeCustom(cmd, dir, emptyStr, identity) -} - -// GenYamlTreeCustom creates yaml structured ref files. -func GenYamlTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error { - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { - continue - } - if err := GenYamlTreeCustom(c, dir, filePrepender, linkHandler); err != nil { - return err - } - } - - basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + ".yaml" - filename := filepath.Join(dir, basename) - f, err := os.Create(filename) - if err != nil { - return err - } - defer f.Close() - - if _, err := io.WriteString(f, filePrepender(filename)); err != nil { - return err - } - if err := GenYamlCustom(cmd, f, linkHandler); err != nil { - return err - } - return nil -} - -// GenYaml creates yaml output. -func GenYaml(cmd *cobra.Command, w io.Writer) error { - return GenYamlCustom(cmd, w, func(s string) string { return s }) -} - -// GenYamlCustom creates custom yaml output. -func GenYamlCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error { - cmd.InitDefaultHelpCmd() - cmd.InitDefaultHelpFlag() - - yamlDoc := cmdDoc{} - yamlDoc.Name = cmd.CommandPath() - - yamlDoc.Synopsis = forceMultiLine(cmd.Short) - yamlDoc.Description = forceMultiLine(cmd.Long) - - if cmd.Runnable() { - yamlDoc.Usage = cmd.UseLine() - } - - if len(cmd.Example) > 0 { - yamlDoc.Example = cmd.Example - } - - flags := cmd.NonInheritedFlags() - if flags.HasFlags() { - yamlDoc.Options = genFlagResult(flags) - } - flags = cmd.InheritedFlags() - if flags.HasFlags() { - yamlDoc.InheritedOptions = genFlagResult(flags) - } - - if hasSeeAlso(cmd) { - result := []string{} - if cmd.HasParent() { - parent := cmd.Parent() - result = append(result, parent.CommandPath()+" - "+parent.Short) - } - children := cmd.Commands() - sort.Sort(byName(children)) - for _, child := range children { - if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { - continue - } - result = append(result, child.CommandPath()+" - "+child.Short) - } - yamlDoc.SeeAlso = result - } - - final, err := yaml.Marshal(&yamlDoc) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - if _, err := w.Write(final); err != nil { - return err - } - return nil -} - -func genFlagResult(flags *pflag.FlagSet) []cmdOption { - var result []cmdOption - - flags.VisitAll(func(flag *pflag.Flag) { - // Todo, when we mark a shorthand is deprecated, but specify an empty message. - // The flag.ShorthandDeprecated is empty as the shorthand is deprecated. - // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok. - if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 { - opt := cmdOption{ - flag.Name, - flag.Shorthand, - flag.DefValue, - forceMultiLine(flag.Usage), - } - result = append(result, opt) - } else { - opt := cmdOption{ - Name: flag.Name, - DefaultValue: forceMultiLine(flag.DefValue), - Usage: forceMultiLine(flag.Usage), - } - result = append(result, opt) - } - }) - - return result -} diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go deleted file mode 100644 index 12d61b6911..0000000000 --- a/vendor/github.com/spf13/cobra/fish_completions.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "strings" -) - -func genFishComp(buf io.StringWriter, name string, includeDesc bool) { - // Variables should not contain a '-' or ':' character - nameForVar := name - nameForVar = strings.ReplaceAll(nameForVar, "-", "_") - nameForVar = strings.ReplaceAll(nameForVar, ":", "_") - - compCmd := ShellCompRequestCmd - if !includeDesc { - compCmd = ShellCompNoDescRequestCmd - } - WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name)) - WriteStringAndCheck(buf, fmt.Sprintf(` -function __%[1]s_debug - set -l file "$BASH_COMP_DEBUG_FILE" - if test -n "$file" - echo "$argv" >> $file - end -end - -function __%[1]s_perform_completion - __%[1]s_debug "Starting __%[1]s_perform_completion" - - # Extract all args except the last one - set -l args (commandline -opc) - # Extract the last arg and escape it in case it is a space - set -l lastArg (string escape -- (commandline -ct)) - - __%[1]s_debug "args: $args" - __%[1]s_debug "last arg: $lastArg" - - # Disable ActiveHelp which is not supported for fish shell - set -l requestComp "%[10]s=0 $args[1] %[3]s $args[2..-1] $lastArg" - - __%[1]s_debug "Calling $requestComp" - set -l results (eval $requestComp 2> /dev/null) - - # Some programs may output extra empty lines after the directive. - # Let's ignore them or else it will break completion. - # Ref: https://github.com/spf13/cobra/issues/1279 - for line in $results[-1..1] - if test (string trim -- $line) = "" - # Found an empty line, remove it - set results $results[1..-2] - else - # Found non-empty line, we have our proper output - break - end - end - - set -l comps $results[1..-2] - set -l directiveLine $results[-1] - - # For Fish, when completing a flag with an = (e.g., -n=) - # completions must be prefixed with the flag - set -l flagPrefix (string match -r -- '-.*=' "$lastArg") - - __%[1]s_debug "Comps: $comps" - __%[1]s_debug "DirectiveLine: $directiveLine" - __%[1]s_debug "flagPrefix: $flagPrefix" - - for comp in $comps - printf "%%s%%s\n" "$flagPrefix" "$comp" - end - - printf "%%s\n" "$directiveLine" -end - -# this function limits calls to __%[1]s_perform_completion, by caching the result behind $__%[1]s_perform_completion_once_result -function __%[1]s_perform_completion_once - __%[1]s_debug "Starting __%[1]s_perform_completion_once" - - if test -n "$__%[1]s_perform_completion_once_result" - __%[1]s_debug "Seems like a valid result already exists, skipping __%[1]s_perform_completion" - return 0 - end - - set --global __%[1]s_perform_completion_once_result (__%[1]s_perform_completion) - if test -z "$__%[1]s_perform_completion_once_result" - __%[1]s_debug "No completions, probably due to a failure" - return 1 - end - - __%[1]s_debug "Performed completions and set __%[1]s_perform_completion_once_result" - return 0 -end - -# this function is used to clear the $__%[1]s_perform_completion_once_result variable after completions are run -function __%[1]s_clear_perform_completion_once_result - __%[1]s_debug "" - __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable ==========" - set --erase __%[1]s_perform_completion_once_result - __%[1]s_debug "Successfully erased the variable __%[1]s_perform_completion_once_result" -end - -function __%[1]s_requires_order_preservation - __%[1]s_debug "" - __%[1]s_debug "========= checking if order preservation is required ==========" - - __%[1]s_perform_completion_once - if test -z "$__%[1]s_perform_completion_once_result" - __%[1]s_debug "Error determining if order preservation is required" - return 1 - end - - set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) - __%[1]s_debug "Directive is: $directive" - - set -l shellCompDirectiveKeepOrder %[9]d - set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) %% 2) - __%[1]s_debug "Keeporder is: $keeporder" - - if test $keeporder -ne 0 - __%[1]s_debug "This does require order preservation" - return 0 - end - - __%[1]s_debug "This doesn't require order preservation" - return 1 -end - - -# This function does two things: -# - Obtain the completions and store them in the global __%[1]s_comp_results -# - Return false if file completion should be performed -function __%[1]s_prepare_completions - __%[1]s_debug "" - __%[1]s_debug "========= starting completion logic ==========" - - # Start fresh - set --erase __%[1]s_comp_results - - __%[1]s_perform_completion_once - __%[1]s_debug "Completion results: $__%[1]s_perform_completion_once_result" - - if test -z "$__%[1]s_perform_completion_once_result" - __%[1]s_debug "No completion, probably due to a failure" - # Might as well do file completion, in case it helps - return 1 - end - - set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) - set --global __%[1]s_comp_results $__%[1]s_perform_completion_once_result[1..-2] - - __%[1]s_debug "Completions are: $__%[1]s_comp_results" - __%[1]s_debug "Directive is: $directive" - - set -l shellCompDirectiveError %[4]d - set -l shellCompDirectiveNoSpace %[5]d - set -l shellCompDirectiveNoFileComp %[6]d - set -l shellCompDirectiveFilterFileExt %[7]d - set -l shellCompDirectiveFilterDirs %[8]d - - if test -z "$directive" - set directive 0 - end - - set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2) - if test $compErr -eq 1 - __%[1]s_debug "Received error directive: aborting." - # Might as well do file completion, in case it helps - return 1 - end - - set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2) - set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2) - if test $filefilter -eq 1; or test $dirfilter -eq 1 - __%[1]s_debug "File extension filtering or directory filtering not supported" - # Do full file completion instead - return 1 - end - - set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2) - set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2) - - __%[1]s_debug "nospace: $nospace, nofiles: $nofiles" - - # If we want to prevent a space, or if file completion is NOT disabled, - # we need to count the number of valid completions. - # To do so, we will filter on prefix as the completions we have received - # may not already be filtered so as to allow fish to match on different - # criteria than the prefix. - if test $nospace -ne 0; or test $nofiles -eq 0 - set -l prefix (commandline -t | string escape --style=regex) - __%[1]s_debug "prefix: $prefix" - - set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results) - set --global __%[1]s_comp_results $completions - __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results" - - # Important not to quote the variable for count to work - set -l numComps (count $__%[1]s_comp_results) - __%[1]s_debug "numComps: $numComps" - - if test $numComps -eq 1; and test $nospace -ne 0 - # We must first split on \t to get rid of the descriptions to be - # able to check what the actual completion will be. - # We don't need descriptions anyway since there is only a single - # real completion which the shell will expand immediately. - set -l split (string split --max 1 \t $__%[1]s_comp_results[1]) - - # Fish won't add a space if the completion ends with any - # of the following characters: @=/:., - set -l lastChar (string sub -s -1 -- $split) - if not string match -r -q "[@=/:.,]" -- "$lastChar" - # In other cases, to support the "nospace" directive we trick the shell - # by outputting an extra, longer completion. - __%[1]s_debug "Adding second completion to perform nospace directive" - set --global __%[1]s_comp_results $split[1] $split[1]. - __%[1]s_debug "Completions are now: $__%[1]s_comp_results" - end - end - - if test $numComps -eq 0; and test $nofiles -eq 0 - # To be consistent with bash and zsh, we only trigger file - # completion when there are no other completions - __%[1]s_debug "Requesting file completion" - return 1 - end - end - - return 0 -end - -# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves -# so we can properly delete any completions provided by another script. -# Only do this if the program can be found, or else fish may print some errors; besides, -# the existing completions will only be loaded if the program can be found. -if type -q "%[2]s" - # The space after the program name is essential to trigger completion for the program - # and not completion of the program name itself. - # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish. - complete --do-complete "%[2]s " > /dev/null 2>&1 -end - -# Remove any pre-existing completions for the program since we will be handling all of them. -complete -c %[2]s -e - -# this will get called after the two calls below and clear the $__%[1]s_perform_completion_once_result global -complete -c %[2]s -n '__%[1]s_clear_perform_completion_once_result' -# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results -# which provides the program's completion choices. -# If this doesn't require order preservation, we don't use the -k flag -complete -c %[2]s -n 'not __%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' -# otherwise we use the -k flag -complete -k -c %[2]s -n '__%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' -`, nameForVar, name, compCmd, - ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) -} - -// GenFishCompletion generates fish completion file and writes to the passed writer. -func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error { - buf := new(bytes.Buffer) - genFishComp(buf, c.Name(), includeDesc) - _, err := buf.WriteTo(w) - return err -} - -// GenFishCompletionFile generates fish completion file. -func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenFishCompletion(outFile, includeDesc) -} diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go deleted file mode 100644 index 0671ec5f20..0000000000 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cobra - -import ( - "fmt" - "sort" - "strings" - - flag "github.com/spf13/pflag" -) - -const ( - requiredAsGroup = "cobra_annotation_required_if_others_set" - oneRequired = "cobra_annotation_one_required" - mutuallyExclusive = "cobra_annotation_mutually_exclusive" -) - -// MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors -// if the command is invoked with a subset (but not all) of the given flags. -func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { - c.mergePersistentFlags() - for _, v := range flagNames { - f := c.Flags().Lookup(v) - if f == nil { - panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v)) - } - if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil { - // Only errs if the flag isn't found. - panic(err) - } - } -} - -// MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors -// if the command is invoked without at least one flag from the given set of flags. -func (c *Command) MarkFlagsOneRequired(flagNames ...string) { - c.mergePersistentFlags() - for _, v := range flagNames { - f := c.Flags().Lookup(v) - if f == nil { - panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v)) - } - if err := c.Flags().SetAnnotation(v, oneRequired, append(f.Annotations[oneRequired], strings.Join(flagNames, " "))); err != nil { - // Only errs if the flag isn't found. - panic(err) - } - } -} - -// MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors -// if the command is invoked with more than one flag from the given set of flags. -func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { - c.mergePersistentFlags() - for _, v := range flagNames { - f := c.Flags().Lookup(v) - if f == nil { - panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v)) - } - // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed. - if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil { - panic(err) - } - } -} - -// ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the -// first error encountered. -func (c *Command) ValidateFlagGroups() error { - if c.DisableFlagParsing { - return nil - } - - flags := c.Flags() - - // groupStatus format is the list of flags as a unique ID, - // then a map of each flag name and whether it is set or not. - groupStatus := map[string]map[string]bool{} - oneRequiredGroupStatus := map[string]map[string]bool{} - mutuallyExclusiveGroupStatus := map[string]map[string]bool{} - flags.VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) - }) - - if err := validateRequiredFlagGroups(groupStatus); err != nil { - return err - } - if err := validateOneRequiredFlagGroups(oneRequiredGroupStatus); err != nil { - return err - } - if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil { - return err - } - return nil -} - -func hasAllFlags(fs *flag.FlagSet, flagnames ...string) bool { - for _, fname := range flagnames { - f := fs.Lookup(fname) - if f == nil { - return false - } - } - return true -} - -func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annotation string, groupStatus map[string]map[string]bool) { - groupInfo, found := pflag.Annotations[annotation] - if found { - for _, group := range groupInfo { - if groupStatus[group] == nil { - flagnames := strings.Split(group, " ") - - // Only consider this flag group at all if all the flags are defined. - if !hasAllFlags(flags, flagnames...) { - continue - } - - groupStatus[group] = map[string]bool{} - for _, name := range flagnames { - groupStatus[group][name] = false - } - } - - groupStatus[group][pflag.Name] = pflag.Changed - } - } -} - -func validateRequiredFlagGroups(data map[string]map[string]bool) error { - keys := sortedKeys(data) - for _, flagList := range keys { - flagnameAndStatus := data[flagList] - - unset := []string{} - for flagname, isSet := range flagnameAndStatus { - if !isSet { - unset = append(unset, flagname) - } - } - if len(unset) == len(flagnameAndStatus) || len(unset) == 0 { - continue - } - - // Sort values, so they can be tested/scripted against consistently. - sort.Strings(unset) - return fmt.Errorf("if any flags in the group [%v] are set they must all be set; missing %v", flagList, unset) - } - - return nil -} - -func validateOneRequiredFlagGroups(data map[string]map[string]bool) error { - keys := sortedKeys(data) - for _, flagList := range keys { - flagnameAndStatus := data[flagList] - var set []string - for flagname, isSet := range flagnameAndStatus { - if isSet { - set = append(set, flagname) - } - } - if len(set) >= 1 { - continue - } - - // Sort values, so they can be tested/scripted against consistently. - sort.Strings(set) - return fmt.Errorf("at least one of the flags in the group [%v] is required", flagList) - } - return nil -} - -func validateExclusiveFlagGroups(data map[string]map[string]bool) error { - keys := sortedKeys(data) - for _, flagList := range keys { - flagnameAndStatus := data[flagList] - var set []string - for flagname, isSet := range flagnameAndStatus { - if isSet { - set = append(set, flagname) - } - } - if len(set) == 0 || len(set) == 1 { - continue - } - - // Sort values, so they can be tested/scripted against consistently. - sort.Strings(set) - return fmt.Errorf("if any flags in the group [%v] are set none of the others can be; %v were all set", flagList, set) - } - return nil -} - -func sortedKeys(m map[string]map[string]bool) []string { - keys := make([]string, len(m)) - i := 0 - for k := range m { - keys[i] = k - i++ - } - sort.Strings(keys) - return keys -} - -// enforceFlagGroupsForCompletion will do the following: -// - when a flag in a group is present, other flags in the group will be marked required -// - when none of the flags in a one-required group are present, all flags in the group will be marked required -// - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden -// This allows the standard completion logic to behave appropriately for flag groups -func (c *Command) enforceFlagGroupsForCompletion() { - if c.DisableFlagParsing { - return - } - - flags := c.Flags() - groupStatus := map[string]map[string]bool{} - oneRequiredGroupStatus := map[string]map[string]bool{} - mutuallyExclusiveGroupStatus := map[string]map[string]bool{} - c.Flags().VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) - }) - - // If a flag that is part of a group is present, we make all the other flags - // of that group required so that the shell completion suggests them automatically - for flagList, flagnameAndStatus := range groupStatus { - for _, isSet := range flagnameAndStatus { - if isSet { - // One of the flags of the group is set, mark the other ones as required - for _, fName := range strings.Split(flagList, " ") { - _ = c.MarkFlagRequired(fName) - } - } - } - } - - // If none of the flags of a one-required group are present, we make all the flags - // of that group required so that the shell completion suggests them automatically - for flagList, flagnameAndStatus := range oneRequiredGroupStatus { - set := 0 - - for _, isSet := range flagnameAndStatus { - if isSet { - set++ - } - } - - // None of the flags of the group are set, mark all flags in the group - // as required - if set == 0 { - for _, fName := range strings.Split(flagList, " ") { - _ = c.MarkFlagRequired(fName) - } - } - } - - // If a flag that is mutually exclusive to others is present, we hide the other - // flags of that group so the shell completion does not suggest them - for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus { - for flagName, isSet := range flagnameAndStatus { - if isSet { - // One of the flags of the mutually exclusive group is set, mark the other ones as hidden - // Don't mark the flag that is already set as hidden because it may be an - // array or slice flag and therefore must continue being suggested - for _, fName := range strings.Split(flagList, " ") { - if fName != flagName { - flag := c.Flags().Lookup(fName) - flag.Hidden = true - } - } - } - } - } -} diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go deleted file mode 100644 index 5519519394..0000000000 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but -// can be downloaded separately for windows 7 or 8.1). - -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "strings" -) - -func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { - // Variables should not contain a '-' or ':' character - nameForVar := name - nameForVar = strings.Replace(nameForVar, "-", "_", -1) - nameForVar = strings.Replace(nameForVar, ":", "_", -1) - - compCmd := ShellCompRequestCmd - if !includeDesc { - compCmd = ShellCompNoDescRequestCmd - } - WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*- - -function __%[1]s_debug { - if ($env:BASH_COMP_DEBUG_FILE) { - "$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE" - } -} - -filter __%[1]s_escapeStringWithSpecialChars { -`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+` -} - -[scriptblock]${__%[2]sCompleterBlock} = { - param( - $WordToComplete, - $CommandAst, - $CursorPosition - ) - - # Get the current command line and convert into a string - $Command = $CommandAst.CommandElements - $Command = "$Command" - - __%[1]s_debug "" - __%[1]s_debug "========= starting completion logic ==========" - __%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition" - - # The user could have moved the cursor backwards on the command-line. - # We need to trigger completion from the $CursorPosition location, so we need - # to truncate the command-line ($Command) up to the $CursorPosition location. - # Make sure the $Command is longer then the $CursorPosition before we truncate. - # This happens because the $Command does not include the last space. - if ($Command.Length -gt $CursorPosition) { - $Command=$Command.Substring(0,$CursorPosition) - } - __%[1]s_debug "Truncated command: $Command" - - $ShellCompDirectiveError=%[4]d - $ShellCompDirectiveNoSpace=%[5]d - $ShellCompDirectiveNoFileComp=%[6]d - $ShellCompDirectiveFilterFileExt=%[7]d - $ShellCompDirectiveFilterDirs=%[8]d - $ShellCompDirectiveKeepOrder=%[9]d - - # Prepare the command to request completions for the program. - # Split the command at the first space to separate the program and arguments. - $Program,$Arguments = $Command.Split(" ",2) - - $RequestComp="$Program %[3]s $Arguments" - __%[1]s_debug "RequestComp: $RequestComp" - - # we cannot use $WordToComplete because it - # has the wrong values if the cursor was moved - # so use the last argument - if ($WordToComplete -ne "" ) { - $WordToComplete = $Arguments.Split(" ")[-1] - } - __%[1]s_debug "New WordToComplete: $WordToComplete" - - - # Check for flag with equal sign - $IsEqualFlag = ($WordToComplete -Like "--*=*" ) - if ( $IsEqualFlag ) { - __%[1]s_debug "Completing equal sign flag" - # Remove the flag part - $Flag,$WordToComplete = $WordToComplete.Split("=",2) - } - - if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) { - # If the last parameter is complete (there is a space following it) - # We add an extra empty parameter so we can indicate this to the go method. - __%[1]s_debug "Adding extra empty parameter" - # PowerShell 7.2+ changed the way how the arguments are passed to executables, - # so for pre-7.2 or when Legacy argument passing is enabled we need to use -`+" # `\"`\" to pass an empty argument, a \"\" or '' does not work!!!"+` - if ($PSVersionTable.PsVersion -lt [version]'7.2.0' -or - ($PSVersionTable.PsVersion -lt [version]'7.3.0' -and -not [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -or - (($PSVersionTable.PsVersion -ge [version]'7.3.0' -or [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -and - $PSNativeCommandArgumentPassing -eq 'Legacy')) { -`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` - } else { - $RequestComp="$RequestComp" + ' ""' - } - } - - __%[1]s_debug "Calling $RequestComp" - # First disable ActiveHelp which is not supported for Powershell - ${env:%[10]s}=0 - - #call the command store the output in $out and redirect stderr and stdout to null - # $Out is an array contains each line per element - Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null - - # get directive from last line - [int]$Directive = $Out[-1].TrimStart(':') - if ($Directive -eq "") { - # There is no directive specified - $Directive = 0 - } - __%[1]s_debug "The completion directive is: $Directive" - - # remove directive (last element) from out - $Out = $Out | Where-Object { $_ -ne $Out[-1] } - __%[1]s_debug "The completions are: $Out" - - if (($Directive -band $ShellCompDirectiveError) -ne 0 ) { - # Error code. No completion. - __%[1]s_debug "Received error from custom completion go code" - return - } - - $Longest = 0 - [Array]$Values = $Out | ForEach-Object { - #Split the output in name and description -`+" $Name, $Description = $_.Split(\"`t\",2)"+` - __%[1]s_debug "Name: $Name Description: $Description" - - # Look for the longest completion so that we can format things nicely - if ($Longest -lt $Name.Length) { - $Longest = $Name.Length - } - - # Set the description to a one space string if there is none set. - # This is needed because the CompletionResult does not accept an empty string as argument - if (-Not $Description) { - $Description = " " - } - @{Name="$Name";Description="$Description"} - } - - - $Space = " " - if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) { - # remove the space here - __%[1]s_debug "ShellCompDirectiveNoSpace is called" - $Space = "" - } - - if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or - (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) { - __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported" - - # return here to prevent the completion of the extensions - return - } - - $Values = $Values | Where-Object { - # filter the result - $_.Name -like "$WordToComplete*" - - # Join the flag back if we have an equal sign flag - if ( $IsEqualFlag ) { - __%[1]s_debug "Join the equal sign flag back to the completion value" - $_.Name = $Flag + "=" + $_.Name - } - } - - # we sort the values in ascending order by name if keep order isn't passed - if (($Directive -band $ShellCompDirectiveKeepOrder) -eq 0 ) { - $Values = $Values | Sort-Object -Property Name - } - - if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { - __%[1]s_debug "ShellCompDirectiveNoFileComp is called" - - if ($Values.Length -eq 0) { - # Just print an empty string here so the - # shell does not start to complete paths. - # We cannot use CompletionResult here because - # it does not accept an empty string as argument. - "" - return - } - } - - # Get the current mode - $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function - __%[1]s_debug "Mode: $Mode" - - $Values | ForEach-Object { - - # store temporary because switch will overwrite $_ - $comp = $_ - - # PowerShell supports three different completion modes - # - TabCompleteNext (default windows style - on each key press the next option is displayed) - # - Complete (works like bash) - # - MenuComplete (works like zsh) - # You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function - - # CompletionResult Arguments: - # 1) CompletionText text to be used as the auto completion result - # 2) ListItemText text to be displayed in the suggestion list - # 3) ResultType type of completion result - # 4) ToolTip text for the tooltip with details about the object - - switch ($Mode) { - - # bash like - "Complete" { - - if ($Values.Length -eq 1) { - __%[1]s_debug "Only one completion left" - - # insert space after value - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") - - } else { - # Add the proper number of spaces to align the descriptions - while($comp.Name.Length -lt $Longest) { - $comp.Name = $comp.Name + " " - } - - # Check for empty description and only add parentheses if needed - if ($($comp.Description) -eq " " ) { - $Description = "" - } else { - $Description = " ($($comp.Description))" - } - - [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") - } - } - - # zsh like - "MenuComplete" { - # insert space after value - # MenuComplete will automatically show the ToolTip of - # the highlighted value at the bottom of the suggestions. - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") - } - - # TabCompleteNext and in case we get something unknown - Default { - # Like MenuComplete but we don't want to add a space here because - # the user need to press space anyway to get the completion. - # Description will not be shown because that's not possible with TabCompleteNext - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") - } - } - - } -} - -Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock ${__%[2]sCompleterBlock} -`, name, nameForVar, compCmd, - ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) -} - -func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error { - buf := new(bytes.Buffer) - genPowerShellComp(buf, c.Name(), includeDesc) - _, err := buf.WriteTo(w) - return err -} - -func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.genPowerShellCompletion(outFile, includeDesc) -} - -// GenPowerShellCompletionFile generates powershell completion file without descriptions. -func (c *Command) GenPowerShellCompletionFile(filename string) error { - return c.genPowerShellCompletionFile(filename, false) -} - -// GenPowerShellCompletion generates powershell completion file without descriptions -// and writes it to the passed writer. -func (c *Command) GenPowerShellCompletion(w io.Writer) error { - return c.genPowerShellCompletion(w, false) -} - -// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions. -func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error { - return c.genPowerShellCompletionFile(filename, true) -} - -// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions -// and writes it to the passed writer. -func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error { - return c.genPowerShellCompletion(w, true) -} diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go deleted file mode 100644 index b035742d39..0000000000 --- a/vendor/github.com/spf13/cobra/shell_completions.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cobra - -import ( - "github.com/spf13/pflag" -) - -// MarkFlagRequired instructs the various shell completion implementations to -// prioritize the named flag when performing completion, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkFlagRequired(name string) error { - return MarkFlagRequired(c.Flags(), name) -} - -// MarkPersistentFlagRequired instructs the various shell completion implementations to -// prioritize the named persistent flag when performing completion, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkPersistentFlagRequired(name string) error { - return MarkFlagRequired(c.PersistentFlags(), name) -} - -// MarkFlagRequired instructs the various shell completion implementations to -// prioritize the named flag when performing completion, -// and causes your command to report an error if invoked without the flag. -func MarkFlagRequired(flags *pflag.FlagSet, name string) error { - return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) -} - -// MarkFlagFilename instructs the various shell completion implementations to -// limit completions for the named flag to the specified file extensions. -func (c *Command) MarkFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.Flags(), name, extensions...) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. -// The bash completion script will call the bash function f for the flag. -// -// This will only work for bash completion. -// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows -// to register a Go function which will work across all shells. -func (c *Command) MarkFlagCustom(name string, f string) error { - return MarkFlagCustom(c.Flags(), name, f) -} - -// MarkPersistentFlagFilename instructs the various shell completion -// implementations to limit completions for the named persistent flag to the -// specified file extensions. -func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.PersistentFlags(), name, extensions...) -} - -// MarkFlagFilename instructs the various shell completion implementations to -// limit completions for the named flag to the specified file extensions. -func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { - return flags.SetAnnotation(name, BashCompFilenameExt, extensions) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. -// The bash completion script will call the bash function f for the flag. -// -// This will only work for bash completion. -// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows -// to register a Go function which will work across all shells. -func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { - return flags.SetAnnotation(name, BashCompCustom, []string{f}) -} - -// MarkFlagDirname instructs the various shell completion implementations to -// limit completions for the named flag to directory names. -func (c *Command) MarkFlagDirname(name string) error { - return MarkFlagDirname(c.Flags(), name) -} - -// MarkPersistentFlagDirname instructs the various shell completion -// implementations to limit completions for the named persistent flag to -// directory names. -func (c *Command) MarkPersistentFlagDirname(name string) error { - return MarkFlagDirname(c.PersistentFlags(), name) -} - -// MarkFlagDirname instructs the various shell completion implementations to -// limit completions for the named flag to directory names. -func MarkFlagDirname(flags *pflag.FlagSet, name string) error { - return flags.SetAnnotation(name, BashCompSubdirsInDir, []string{}) -} diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go deleted file mode 100644 index 1856e4c7f6..0000000000 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2013-2023 The Cobra Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// GenZshCompletionFile generates zsh completion file including descriptions. -func (c *Command) GenZshCompletionFile(filename string) error { - return c.genZshCompletionFile(filename, true) -} - -// GenZshCompletion generates zsh completion file including descriptions -// and writes it to the passed writer. -func (c *Command) GenZshCompletion(w io.Writer) error { - return c.genZshCompletion(w, true) -} - -// GenZshCompletionFileNoDesc generates zsh completion file without descriptions. -func (c *Command) GenZshCompletionFileNoDesc(filename string) error { - return c.genZshCompletionFile(filename, false) -} - -// GenZshCompletionNoDesc generates zsh completion file without descriptions -// and writes it to the passed writer. -func (c *Command) GenZshCompletionNoDesc(w io.Writer) error { - return c.genZshCompletion(w, false) -} - -// MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was -// not consistent with Bash completion. It has therefore been disabled. -// Instead, when no other completion is specified, file completion is done by -// default for every argument. One can disable file completion on a per-argument -// basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp. -// To achieve file extension filtering, one can use ValidArgsFunction and -// ShellCompDirectiveFilterFileExt. -// -// Deprecated -func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error { - return nil -} - -// MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore -// been disabled. -// To achieve the same behavior across all shells, one can use -// ValidArgs (for the first argument only) or ValidArgsFunction for -// any argument (can include the first one also). -// -// Deprecated -func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error { - return nil -} - -func (c *Command) genZshCompletionFile(filename string, includeDesc bool) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.genZshCompletion(outFile, includeDesc) -} - -func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error { - buf := new(bytes.Buffer) - genZshComp(buf, c.Name(), includeDesc) - _, err := buf.WriteTo(w) - return err -} - -func genZshComp(buf io.StringWriter, name string, includeDesc bool) { - compCmd := ShellCompRequestCmd - if !includeDesc { - compCmd = ShellCompNoDescRequestCmd - } - WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s -compdef _%[1]s %[1]s - -# zsh completion for %-36[1]s -*- shell-script -*- - -__%[1]s_debug() -{ - local file="$BASH_COMP_DEBUG_FILE" - if [[ -n ${file} ]]; then - echo "$*" >> "${file}" - fi -} - -_%[1]s() -{ - local shellCompDirectiveError=%[3]d - local shellCompDirectiveNoSpace=%[4]d - local shellCompDirectiveNoFileComp=%[5]d - local shellCompDirectiveFilterFileExt=%[6]d - local shellCompDirectiveFilterDirs=%[7]d - local shellCompDirectiveKeepOrder=%[8]d - - local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder - local -a completions - - __%[1]s_debug "\n========= starting completion logic ==========" - __%[1]s_debug "CURRENT: ${CURRENT}, words[*]: ${words[*]}" - - # The user could have moved the cursor backwards on the command-line. - # We need to trigger completion from the $CURRENT location, so we need - # to truncate the command-line ($words) up to the $CURRENT location. - # (We cannot use $CURSOR as its value does not work when a command is an alias.) - words=("${=words[1,CURRENT]}") - __%[1]s_debug "Truncated words[*]: ${words[*]}," - - lastParam=${words[-1]} - lastChar=${lastParam[-1]} - __%[1]s_debug "lastParam: ${lastParam}, lastChar: ${lastChar}" - - # For zsh, when completing a flag with an = (e.g., %[1]s -n=) - # completions must be prefixed with the flag - setopt local_options BASH_REMATCH - if [[ "${lastParam}" =~ '-.*=' ]]; then - # We are dealing with a flag with an = - flagPrefix="-P ${BASH_REMATCH}" - fi - - # Prepare the command to obtain completions - requestComp="${words[1]} %[2]s ${words[2,-1]}" - if [ "${lastChar}" = "" ]; then - # If the last parameter is complete (there is a space following it) - # We add an extra empty parameter so we can indicate this to the go completion code. - __%[1]s_debug "Adding extra empty parameter" - requestComp="${requestComp} \"\"" - fi - - __%[1]s_debug "About to call: eval ${requestComp}" - - # Use eval to handle any environment variables and such - out=$(eval ${requestComp} 2>/dev/null) - __%[1]s_debug "completion output: ${out}" - - # Extract the directive integer following a : from the last line - local lastLine - while IFS='\n' read -r line; do - lastLine=${line} - done < <(printf "%%s\n" "${out[@]}") - __%[1]s_debug "last line: ${lastLine}" - - if [ "${lastLine[1]}" = : ]; then - directive=${lastLine[2,-1]} - # Remove the directive including the : and the newline - local suffix - (( suffix=${#lastLine}+2)) - out=${out[1,-$suffix]} - else - # There is no directive specified. Leave $out as is. - __%[1]s_debug "No directive found. Setting do default" - directive=0 - fi - - __%[1]s_debug "directive: ${directive}" - __%[1]s_debug "completions: ${out}" - __%[1]s_debug "flagPrefix: ${flagPrefix}" - - if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then - __%[1]s_debug "Completion received error. Ignoring completions." - return - fi - - local activeHelpMarker="%[9]s" - local endIndex=${#activeHelpMarker} - local startIndex=$((${#activeHelpMarker}+1)) - local hasActiveHelp=0 - while IFS='\n' read -r comp; do - # Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker) - if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then - __%[1]s_debug "ActiveHelp found: $comp" - comp="${comp[$startIndex,-1]}" - if [ -n "$comp" ]; then - compadd -x "${comp}" - __%[1]s_debug "ActiveHelp will need delimiter" - hasActiveHelp=1 - fi - - continue - fi - - if [ -n "$comp" ]; then - # If requested, completions are returned with a description. - # The description is preceded by a TAB character. - # For zsh's _describe, we need to use a : instead of a TAB. - # We first need to escape any : as part of the completion itself. - comp=${comp//:/\\:} - - local tab="$(printf '\t')" - comp=${comp//$tab/:} - - __%[1]s_debug "Adding completion: ${comp}" - completions+=${comp} - lastComp=$comp - fi - done < <(printf "%%s\n" "${out[@]}") - - # Add a delimiter after the activeHelp statements, but only if: - # - there are completions following the activeHelp statements, or - # - file completion will be performed (so there will be choices after the activeHelp) - if [ $hasActiveHelp -eq 1 ]; then - if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then - __%[1]s_debug "Adding activeHelp delimiter" - compadd -x "--" - hasActiveHelp=0 - fi - fi - - if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then - __%[1]s_debug "Activating nospace." - noSpace="-S ''" - fi - - if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then - __%[1]s_debug "Activating keep order." - keepOrder="-V" - fi - - if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then - # File extension filtering - local filteringCmd - filteringCmd='_files' - for filter in ${completions[@]}; do - if [ ${filter[1]} != '*' ]; then - # zsh requires a glob pattern to do file filtering - filter="\*.$filter" - fi - filteringCmd+=" -g $filter" - done - filteringCmd+=" ${flagPrefix}" - - __%[1]s_debug "File filtering command: $filteringCmd" - _arguments '*:filename:'"$filteringCmd" - elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then - # File completion for directories only - local subdir - subdir="${completions[1]}" - if [ -n "$subdir" ]; then - __%[1]s_debug "Listing directories in $subdir" - pushd "${subdir}" >/dev/null 2>&1 - else - __%[1]s_debug "Listing directories in ." - fi - - local result - _arguments '*:dirname:_files -/'" ${flagPrefix}" - result=$? - if [ -n "$subdir" ]; then - popd >/dev/null 2>&1 - fi - return $result - else - __%[1]s_debug "Calling _describe" - if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then - __%[1]s_debug "_describe found some completions" - - # Return the success of having called _describe - return 0 - else - __%[1]s_debug "_describe did not find completions." - __%[1]s_debug "Checking if we should do file completion." - if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then - __%[1]s_debug "deactivating file completion" - - # We must return an error code here to let zsh know that there were no - # completions found by _describe; this is what will trigger other - # matching algorithms to attempt to find completions. - # For example zsh can match letters in the middle of words. - return 1 - else - # Perform file completion - __%[1]s_debug "Activating file completion" - - # We must return the result of this command, so it must be the - # last command, or else we must store its result to return it. - _arguments '*:filename:_files'" ${flagPrefix}" - fi - fi - fi -} - -# don't run the completion function when being source-ed or eval-ed -if [ "$funcstack[1]" = "_%[1]s" ]; then - _%[1]s -fi -`, name, compCmd, - ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, - activeHelpMarker)) -} diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore deleted file mode 100644 index c3da290134..0000000000 --- a/vendor/github.com/spf13/pflag/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.idea/* - diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml deleted file mode 100644 index 00d04cb9b0..0000000000 --- a/vendor/github.com/spf13/pflag/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -sudo: false - -language: go - -go: - - 1.9.x - - 1.10.x - - 1.11.x - - tip - -matrix: - allow_failures: - - go: tip - -install: - - go get golang.org/x/lint/golint - - export PATH=$GOPATH/bin:$PATH - - go install ./... - -script: - - verify/all.sh -v - - go test ./... diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE deleted file mode 100644 index 63ed1cfea1..0000000000 --- a/vendor/github.com/spf13/pflag/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 Alex Ogier. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md deleted file mode 100644 index 7eacc5bdbe..0000000000 --- a/vendor/github.com/spf13/pflag/README.md +++ /dev/null @@ -1,296 +0,0 @@ -[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag) -[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag) - -## Description - -pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the [GNU extensions to the POSIX recommendations -for command-line options][1]. For a more precise description, see the -"Command-line flag syntax" section below. - -[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -pflag is available under the same style of BSD license as the Go language, -which can be found in the LICENSE file. - -## Installation - -pflag is available using the standard `go get` command. - -Install by running: - - go get github.com/spf13/pflag - -Run tests by running: - - go test github.com/spf13/pflag - -## Usage - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - -``` go -import flag "github.com/spf13/pflag" -``` - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - -``` go -var ip *int = flag.Int("flagname", 1234, "help message for flagname") -``` - -If you like, you can bind the flag to a variable using the Var() functions. - -``` go -var flagvar int -func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") -} -``` - -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - -``` go -flag.Var(&flagVal, "name", "help message for flagname") -``` - -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - -``` go -flag.Parse() -``` - -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - -``` go -fmt.Println("ip has value ", *ip) -fmt.Println("flagvar has value ", flagvar) -``` - -There are helper functions available to get the value stored in a Flag if you have a FlagSet but find -it difficult to keep up with all of the pointers in your code. -If you have a pflag.FlagSet with a flag called 'flagname' of type int you -can use GetInt() to get the int value. But notice that 'flagname' must exist -and it must be an int. GetString("flagname") will fail. - -``` go -i, err := flagset.GetInt("flagname") -``` - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -var flagvar bool -func init() { - flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") -} -flag.VarP(&flagVal, "varname", "v", "help message") -``` - -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. - -## Setting no option default values for flags - -After you create a flag it is possible to set the pflag.NoOptDefVal for -the given flag. Doing this changes the meaning of the flag slightly. If -a flag has a NoOptDefVal and the flag is set on the command line without -an option the flag will be set to the NoOptDefVal. For example given: - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -flag.Lookup("flagname").NoOptDefVal = "4321" -``` - -Would result in something like - -| Parsed Arguments | Resulting Value | -| ------------- | ------------- | -| --flagname=1357 | ip=1357 | -| --flagname | ip=4321 | -| [nothing] | ip=1234 | - -## Command line flag syntax - -``` ---flag // boolean flags, or flags with no option default values ---flag x // only on flags without a default value ---flag=x -``` - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags -or a flag with a default value - -``` -// boolean or flags where the 'no option default value' is set --f --f=true --abc -but --b true is INVALID - -// non-boolean and flags without a 'no option default value' --n 1234 --n=1234 --n1234 - -// mixed --abcs "hello" --absd="hello" --abcs1234 -``` - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -## Mutating or "Normalizing" Flag names - -It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. - -**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag - -``` go -func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - from := []string{"-", "_"} - to := "." - for _, sep := range from { - name = strings.Replace(name, sep, to, -1) - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) -``` - -**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name - -``` go -func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - switch name { - case "old-flag-name": - name = "new-flag-name" - break - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) -``` - -## Deprecating a flag or its shorthand -It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used. - -**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead. -```go -// deprecate a flag by specifying its name and a usage message -flags.MarkDeprecated("badflag", "please use --good-flag instead") -``` -This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used. - -**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n". -```go -// deprecate a flag shorthand by specifying its flag name and a usage message -flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only") -``` -This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used. - -Note that usage message is essential here, and it should not be empty. - -## Hidden flags -It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text. - -**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available. -```go -// hide a flag by specifying its name -flags.MarkHidden("secretFlag") -``` - -## Disable sorting of flags -`pflag` allows you to disable sorting of flags for help and usage message. - -**Example**: -```go -flags.BoolP("verbose", "v", false, "verbose output") -flags.String("coolflag", "yeaah", "it's really cool flag") -flags.Int("usefulflag", 777, "sometimes it's very useful") -flags.SortFlags = false -flags.PrintDefaults() -``` -**Output**: -``` - -v, --verbose verbose output - --coolflag string it's really cool flag (default "yeaah") - --usefulflag int sometimes it's very useful (default 777) -``` - - -## Supporting Go flags when using pflag -In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary -to support flags defined by third-party dependencies (e.g. `golang/glog`). - -**Example**: You want to add the Go flags to the `CommandLine` flagset -```go -import ( - goflag "flag" - flag "github.com/spf13/pflag" -) - -var ip *int = flag.Int("flagname", 1234, "help message for flagname") - -func main() { - flag.CommandLine.AddGoFlagSet(goflag.CommandLine) - flag.Parse() -} -``` - -## More info - -You can see the full reference documentation of the pflag package -[at godoc.org][3], or through go's standard documentation system by -running `godoc -http=:6060` and browsing to -[http://localhost:6060/pkg/github.com/spf13/pflag][2] after -installation. - -[2]: http://localhost:6060/pkg/github.com/spf13/pflag -[3]: http://godoc.org/github.com/spf13/pflag diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go deleted file mode 100644 index c4c5c0bfda..0000000000 --- a/vendor/github.com/spf13/pflag/bool.go +++ /dev/null @@ -1,94 +0,0 @@ -package pflag - -import "strconv" - -// optional interface to indicate boolean flags that can be -// supplied without "=value" text -type boolFlag interface { - Value - IsBoolFlag() bool -} - -// -- bool Value -type boolValue bool - -func newBoolValue(val bool, p *bool) *boolValue { - *p = val - return (*boolValue)(p) -} - -func (b *boolValue) Set(s string) error { - v, err := strconv.ParseBool(s) - *b = boolValue(v) - return err -} - -func (b *boolValue) Type() string { - return "bool" -} - -func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) } - -func (b *boolValue) IsBoolFlag() bool { return true } - -func boolConv(sval string) (interface{}, error) { - return strconv.ParseBool(sval) -} - -// GetBool return the bool value of a flag with the given name -func (f *FlagSet) GetBool(name string) (bool, error) { - val, err := f.getFlagType(name, "bool", boolConv) - if err != nil { - return false, err - } - return val.(bool), nil -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { - f.BoolVarP(p, name, "", value, usage) -} - -// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage) - flag.NoOptDefVal = "true" -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func BoolVar(p *bool, name string, value bool, usage string) { - BoolVarP(p, name, "", value, usage) -} - -// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage) - flag.NoOptDefVal = "true" -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func (f *FlagSet) Bool(name string, value bool, usage string) *bool { - return f.BoolP(name, "", value, usage) -} - -// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { - p := new(bool) - f.BoolVarP(p, name, shorthand, value, usage) - return p -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func Bool(name string, value bool, usage string) *bool { - return BoolP(name, "", value, usage) -} - -// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. -func BoolP(name, shorthand string, value bool, usage string) *bool { - b := CommandLine.BoolP(name, shorthand, value, usage) - return b -} diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go deleted file mode 100644 index 3731370d6a..0000000000 --- a/vendor/github.com/spf13/pflag/bool_slice.go +++ /dev/null @@ -1,185 +0,0 @@ -package pflag - -import ( - "io" - "strconv" - "strings" -) - -// -- boolSlice Value -type boolSliceValue struct { - value *[]bool - changed bool -} - -func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue { - bsv := new(boolSliceValue) - bsv.value = p - *bsv.value = val - return bsv -} - -// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag. -// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended. -func (s *boolSliceValue) Set(val string) error { - - // remove all quote characters - rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") - - // read flag arguments with CSV parser - boolStrSlice, err := readAsCSV(rmQuote.Replace(val)) - if err != nil && err != io.EOF { - return err - } - - // parse boolean values into slice - out := make([]bool, 0, len(boolStrSlice)) - for _, boolStr := range boolStrSlice { - b, err := strconv.ParseBool(strings.TrimSpace(boolStr)) - if err != nil { - return err - } - out = append(out, b) - } - - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - - s.changed = true - - return nil -} - -// Type returns a string that uniquely represents this flag's type. -func (s *boolSliceValue) Type() string { - return "boolSlice" -} - -// String defines a "native" format for this boolean slice flag value. -func (s *boolSliceValue) String() string { - - boolStrSlice := make([]string, len(*s.value)) - for i, b := range *s.value { - boolStrSlice[i] = strconv.FormatBool(b) - } - - out, _ := writeAsCSV(boolStrSlice) - - return "[" + out + "]" -} - -func (s *boolSliceValue) fromString(val string) (bool, error) { - return strconv.ParseBool(val) -} - -func (s *boolSliceValue) toString(val bool) string { - return strconv.FormatBool(val) -} - -func (s *boolSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *boolSliceValue) Replace(val []string) error { - out := make([]bool, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *boolSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func boolSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []bool{}, nil - } - ss := strings.Split(val, ",") - out := make([]bool, len(ss)) - for i, t := range ss { - var err error - out[i], err = strconv.ParseBool(t) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetBoolSlice returns the []bool value of a flag with the given name. -func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) { - val, err := f.getFlagType(name, "boolSlice", boolSliceConv) - if err != nil { - return []bool{}, err - } - return val.([]bool), nil -} - -// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string. -// The argument p points to a []bool variable in which to store the value of the flag. -func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) { - f.VarP(newBoolSliceValue(value, p), name, "", usage) -} - -// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { - f.VarP(newBoolSliceValue(value, p), name, shorthand, usage) -} - -// BoolSliceVar defines a []bool flag with specified name, default value, and usage string. -// The argument p points to a []bool variable in which to store the value of the flag. -func BoolSliceVar(p *[]bool, name string, value []bool, usage string) { - CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage) -} - -// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. -func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { - CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage) -} - -// BoolSlice defines a []bool flag with specified name, default value, and usage string. -// The return value is the address of a []bool variable that stores the value of the flag. -func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool { - p := []bool{} - f.BoolSliceVarP(&p, name, "", value, usage) - return &p -} - -// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { - p := []bool{} - f.BoolSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// BoolSlice defines a []bool flag with specified name, default value, and usage string. -// The return value is the address of a []bool variable that stores the value of the flag. -func BoolSlice(name string, value []bool, usage string) *[]bool { - return CommandLine.BoolSliceP(name, "", value, usage) -} - -// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. -func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { - return CommandLine.BoolSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go deleted file mode 100644 index 67d5304570..0000000000 --- a/vendor/github.com/spf13/pflag/bytes.go +++ /dev/null @@ -1,209 +0,0 @@ -package pflag - -import ( - "encoding/base64" - "encoding/hex" - "fmt" - "strings" -) - -// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded -type bytesHexValue []byte - -// String implements pflag.Value.String. -func (bytesHex bytesHexValue) String() string { - return fmt.Sprintf("%X", []byte(bytesHex)) -} - -// Set implements pflag.Value.Set. -func (bytesHex *bytesHexValue) Set(value string) error { - bin, err := hex.DecodeString(strings.TrimSpace(value)) - - if err != nil { - return err - } - - *bytesHex = bin - - return nil -} - -// Type implements pflag.Value.Type. -func (*bytesHexValue) Type() string { - return "bytesHex" -} - -func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue { - *p = val - return (*bytesHexValue)(p) -} - -func bytesHexConv(sval string) (interface{}, error) { - - bin, err := hex.DecodeString(sval) - - if err == nil { - return bin, nil - } - - return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err) -} - -// GetBytesHex return the []byte value of a flag with the given name -func (f *FlagSet) GetBytesHex(name string) ([]byte, error) { - val, err := f.getFlagType(name, "bytesHex", bytesHexConv) - - if err != nil { - return []byte{}, err - } - - return val.([]byte), nil -} - -// BytesHexVar defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) { - f.VarP(newBytesHexValue(value, p), name, "", usage) -} - -// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) { - f.VarP(newBytesHexValue(value, p), name, shorthand, usage) -} - -// BytesHexVar defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func BytesHexVar(p *[]byte, name string, value []byte, usage string) { - CommandLine.VarP(newBytesHexValue(value, p), name, "", usage) -} - -// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash. -func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) { - CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage) -} - -// BytesHex defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesHexVarP(p, name, "", value, usage) - return p -} - -// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesHexVarP(p, name, shorthand, value, usage) - return p -} - -// BytesHex defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func BytesHex(name string, value []byte, usage string) *[]byte { - return CommandLine.BytesHexP(name, "", value, usage) -} - -// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash. -func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { - return CommandLine.BytesHexP(name, shorthand, value, usage) -} - -// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded -type bytesBase64Value []byte - -// String implements pflag.Value.String. -func (bytesBase64 bytesBase64Value) String() string { - return base64.StdEncoding.EncodeToString([]byte(bytesBase64)) -} - -// Set implements pflag.Value.Set. -func (bytesBase64 *bytesBase64Value) Set(value string) error { - bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value)) - - if err != nil { - return err - } - - *bytesBase64 = bin - - return nil -} - -// Type implements pflag.Value.Type. -func (*bytesBase64Value) Type() string { - return "bytesBase64" -} - -func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value { - *p = val - return (*bytesBase64Value)(p) -} - -func bytesBase64ValueConv(sval string) (interface{}, error) { - - bin, err := base64.StdEncoding.DecodeString(sval) - if err == nil { - return bin, nil - } - - return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err) -} - -// GetBytesBase64 return the []byte value of a flag with the given name -func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) { - val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv) - - if err != nil { - return []byte{}, err - } - - return val.([]byte), nil -} - -// BytesBase64Var defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) { - f.VarP(newBytesBase64Value(value, p), name, "", usage) -} - -// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) { - f.VarP(newBytesBase64Value(value, p), name, shorthand, usage) -} - -// BytesBase64Var defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func BytesBase64Var(p *[]byte, name string, value []byte, usage string) { - CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage) -} - -// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash. -func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) { - CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage) -} - -// BytesBase64 defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesBase64VarP(p, name, "", value, usage) - return p -} - -// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesBase64VarP(p, name, shorthand, value, usage) - return p -} - -// BytesBase64 defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func BytesBase64(name string, value []byte, usage string) *[]byte { - return CommandLine.BytesBase64P(name, "", value, usage) -} - -// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash. -func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte { - return CommandLine.BytesBase64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go deleted file mode 100644 index a0b2679f71..0000000000 --- a/vendor/github.com/spf13/pflag/count.go +++ /dev/null @@ -1,96 +0,0 @@ -package pflag - -import "strconv" - -// -- count Value -type countValue int - -func newCountValue(val int, p *int) *countValue { - *p = val - return (*countValue)(p) -} - -func (i *countValue) Set(s string) error { - // "+1" means that no specific value was passed, so increment - if s == "+1" { - *i = countValue(*i + 1) - return nil - } - v, err := strconv.ParseInt(s, 0, 0) - *i = countValue(v) - return err -} - -func (i *countValue) Type() string { - return "count" -} - -func (i *countValue) String() string { return strconv.Itoa(int(*i)) } - -func countConv(sval string) (interface{}, error) { - i, err := strconv.Atoi(sval) - if err != nil { - return nil, err - } - return i, nil -} - -// GetCount return the int value of a flag with the given name -func (f *FlagSet) GetCount(name string) (int, error) { - val, err := f.getFlagType(name, "count", countConv) - if err != nil { - return 0, err - } - return val.(int), nil -} - -// CountVar defines a count flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -// A count flag will add 1 to its value every time it is found on the command line -func (f *FlagSet) CountVar(p *int, name string, usage string) { - f.CountVarP(p, name, "", usage) -} - -// CountVarP is like CountVar only take a shorthand for the flag name. -func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { - flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) - flag.NoOptDefVal = "+1" -} - -// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set -func CountVar(p *int, name string, usage string) { - CommandLine.CountVar(p, name, usage) -} - -// CountVarP is like CountVar only take a shorthand for the flag name. -func CountVarP(p *int, name, shorthand string, usage string) { - CommandLine.CountVarP(p, name, shorthand, usage) -} - -// Count defines a count flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value every time it is found on the command line -func (f *FlagSet) Count(name string, usage string) *int { - p := new(int) - f.CountVarP(p, name, "", usage) - return p -} - -// CountP is like Count only takes a shorthand for the flag name. -func (f *FlagSet) CountP(name, shorthand string, usage string) *int { - p := new(int) - f.CountVarP(p, name, shorthand, usage) - return p -} - -// Count defines a count flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line -func Count(name string, usage string) *int { - return CommandLine.CountP(name, "", usage) -} - -// CountP is like Count only takes a shorthand for the flag name. -func CountP(name, shorthand string, usage string) *int { - return CommandLine.CountP(name, shorthand, usage) -} diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go deleted file mode 100644 index e9debef88e..0000000000 --- a/vendor/github.com/spf13/pflag/duration.go +++ /dev/null @@ -1,86 +0,0 @@ -package pflag - -import ( - "time" -) - -// -- time.Duration Value -type durationValue time.Duration - -func newDurationValue(val time.Duration, p *time.Duration) *durationValue { - *p = val - return (*durationValue)(p) -} - -func (d *durationValue) Set(s string) error { - v, err := time.ParseDuration(s) - *d = durationValue(v) - return err -} - -func (d *durationValue) Type() string { - return "duration" -} - -func (d *durationValue) String() string { return (*time.Duration)(d).String() } - -func durationConv(sval string) (interface{}, error) { - return time.ParseDuration(sval) -} - -// GetDuration return the duration value of a flag with the given name -func (f *FlagSet) GetDuration(name string) (time.Duration, error) { - val, err := f.getFlagType(name, "duration", durationConv) - if err != nil { - return 0, err - } - return val.(time.Duration), nil -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, "", usage) -} - -// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, "", usage) -} - -// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, "", value, usage) - return p -} - -// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, shorthand, value, usage) - return p -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func Duration(name string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, "", value, usage) -} - -// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. -func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go deleted file mode 100644 index badadda53f..0000000000 --- a/vendor/github.com/spf13/pflag/duration_slice.go +++ /dev/null @@ -1,166 +0,0 @@ -package pflag - -import ( - "fmt" - "strings" - "time" -) - -// -- durationSlice Value -type durationSliceValue struct { - value *[]time.Duration - changed bool -} - -func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue { - dsv := new(durationSliceValue) - dsv.value = p - *dsv.value = val - return dsv -} - -func (s *durationSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]time.Duration, len(ss)) - for i, d := range ss { - var err error - out[i], err = time.ParseDuration(d) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *durationSliceValue) Type() string { - return "durationSlice" -} - -func (s *durationSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%s", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *durationSliceValue) fromString(val string) (time.Duration, error) { - return time.ParseDuration(val) -} - -func (s *durationSliceValue) toString(val time.Duration) string { - return fmt.Sprintf("%s", val) -} - -func (s *durationSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *durationSliceValue) Replace(val []string) error { - out := make([]time.Duration, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *durationSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func durationSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []time.Duration{}, nil - } - ss := strings.Split(val, ",") - out := make([]time.Duration, len(ss)) - for i, d := range ss { - var err error - out[i], err = time.ParseDuration(d) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetDurationSlice returns the []time.Duration value of a flag with the given name -func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) { - val, err := f.getFlagType(name, "durationSlice", durationSliceConv) - if err != nil { - return []time.Duration{}, err - } - return val.([]time.Duration), nil -} - -// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string. -// The argument p points to a []time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { - f.VarP(newDurationSliceValue(value, p), name, "", usage) -} - -// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { - f.VarP(newDurationSliceValue(value, p), name, shorthand, usage) -} - -// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string. -// The argument p points to a duration[] variable in which to store the value of the flag. -func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { - CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage) -} - -// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. -func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { - CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage) -} - -// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a []time.Duration variable that stores the value of the flag. -func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { - p := []time.Duration{} - f.DurationSliceVarP(&p, name, "", value, usage) - return &p -} - -// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { - p := []time.Duration{} - f.DurationSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a []time.Duration variable that stores the value of the flag. -func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { - return CommandLine.DurationSliceP(name, "", value, usage) -} - -// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. -func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { - return CommandLine.DurationSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go deleted file mode 100644 index 24a5036e95..0000000000 --- a/vendor/github.com/spf13/pflag/flag.go +++ /dev/null @@ -1,1239 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the GNU extensions to the POSIX recommendations -for command-line options. See -http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -Usage: - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - - import flag "github.com/spf13/pflag" - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - var ip = flag.Int("flagname", 1234, "help message for flagname") -If you like, you can bind the flag to a variable using the Var() functions. - var flagvar int - func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") - } -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - flag.Var(&flagVal, "name", "help message for flagname") -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - flag.Parse() -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - fmt.Println("ip has value ", *ip) - fmt.Println("flagvar has value ", flagvar) - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - var ip = flag.IntP("flagname", "f", 1234, "help message") - var flagvar bool - func init() { - flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") - } - flag.VarP(&flagval, "varname", "v", "help message") -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -Command line flag syntax: - --flag // boolean flags only - --flag=x - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags. - // boolean flags - -f - -abc - // non-boolean flags - -n 1234 - -Ifile - // mixed - -abcs "hello" - -abcn1234 - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. -*/ -package pflag - -import ( - "bytes" - "errors" - goflag "flag" - "fmt" - "io" - "os" - "sort" - "strings" -) - -// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. -var ErrHelp = errors.New("pflag: help requested") - -// ErrorHandling defines how to handle flag parsing errors. -type ErrorHandling int - -const ( - // ContinueOnError will return an err from Parse() if an error is found - ContinueOnError ErrorHandling = iota - // ExitOnError will call os.Exit(2) if an error is found when parsing - ExitOnError - // PanicOnError will panic() if an error is found when parsing flags - PanicOnError -) - -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { - // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags - UnknownFlags bool -} - -// NormalizedName is a flag name that has been normalized according to rules -// for the FlagSet (e.g. making '-' and '_' equivalent). -type NormalizedName string - -// A FlagSet represents a set of defined flags. -type FlagSet struct { - // Usage is the function called when an error occurs while parsing flags. - // The field is a function (not a method) that may be changed to point to - // a custom error handler. - Usage func() - - // SortFlags is used to indicate, if user wants to have sorted flags in - // help/usage messages. - SortFlags bool - - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist - - name string - parsed bool - actual map[NormalizedName]*Flag - orderedActual []*Flag - sortedActual []*Flag - formal map[NormalizedName]*Flag - orderedFormal []*Flag - sortedFormal []*Flag - shorthands map[byte]*Flag - args []string // arguments after flags - argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- - errorHandling ErrorHandling - output io.Writer // nil means stderr; use out() accessor - interspersed bool // allow interspersed option/non-option args - normalizeNameFunc func(f *FlagSet, name string) NormalizedName - - addedGoFlagSets []*goflag.FlagSet -} - -// A Flag represents the state of a flag. -type Flag struct { - Name string // name as it appears on command line - Shorthand string // one-letter abbreviated flag - Usage string // help message - Value Value // value as set - DefValue string // default value (as text); for usage message - Changed bool // If the user set the value (or if left to default) - NoOptDefVal string // default value (as text); if the flag is on the command line without any options - Deprecated string // If this flag is deprecated, this string is the new or now thing to use - Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text - ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use - Annotations map[string][]string // used by cobra.Command bash autocomple code -} - -// Value is the interface to the dynamic value stored in a flag. -// (The default value is represented as a string.) -type Value interface { - String() string - Set(string) error - Type() string -} - -// SliceValue is a secondary interface to all flags which hold a list -// of values. This allows full control over the value of list flags, -// and avoids complicated marshalling and unmarshalling to csv. -type SliceValue interface { - // Append adds the specified value to the end of the flag value list. - Append(string) error - // Replace will fully overwrite any data currently in the flag value list. - Replace([]string) error - // GetSlice returns the flag value list as an array of strings. - GetSlice() []string -} - -// sortFlags returns the flags as a slice in lexicographical sorted order. -func sortFlags(flags map[NormalizedName]*Flag) []*Flag { - list := make(sort.StringSlice, len(flags)) - i := 0 - for k := range flags { - list[i] = string(k) - i++ - } - list.Sort() - result := make([]*Flag, len(list)) - for i, name := range list { - result[i] = flags[NormalizedName(name)] - } - return result -} - -// SetNormalizeFunc allows you to add a function which can translate flag names. -// Flags added to the FlagSet will be translated and then when anything tries to -// look up the flag that will also be translated. So it would be possible to create -// a flag named "getURL" and have it translated to "geturl". A user could then pass -// "--getUrl" which may also be translated to "geturl" and everything will work. -func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { - f.normalizeNameFunc = n - f.sortedFormal = f.sortedFormal[:0] - for fname, flag := range f.formal { - nname := f.normalizeFlagName(flag.Name) - if fname == nname { - continue - } - flag.Name = string(nname) - delete(f.formal, fname) - f.formal[nname] = flag - if _, set := f.actual[fname]; set { - delete(f.actual, fname) - f.actual[nname] = flag - } - } -} - -// GetNormalizeFunc returns the previously set NormalizeFunc of a function which -// does no translation, if not set previously. -func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { - if f.normalizeNameFunc != nil { - return f.normalizeNameFunc - } - return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } -} - -func (f *FlagSet) normalizeFlagName(name string) NormalizedName { - n := f.GetNormalizeFunc() - return n(f, name) -} - -func (f *FlagSet) out() io.Writer { - if f.output == nil { - return os.Stderr - } - return f.output -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (f *FlagSet) SetOutput(output io.Writer) { - f.output = output -} - -// VisitAll visits the flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits all flags, even those not set. -func (f *FlagSet) VisitAll(fn func(*Flag)) { - if len(f.formal) == 0 { - return - } - - var flags []*Flag - if f.SortFlags { - if len(f.formal) != len(f.sortedFormal) { - f.sortedFormal = sortFlags(f.formal) - } - flags = f.sortedFormal - } else { - flags = f.orderedFormal - } - - for _, flag := range flags { - fn(flag) - } -} - -// HasFlags returns a bool to indicate if the FlagSet has any flags defined. -func (f *FlagSet) HasFlags() bool { - return len(f.formal) > 0 -} - -// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags -// that are not hidden. -func (f *FlagSet) HasAvailableFlags() bool { - for _, flag := range f.formal { - if !flag.Hidden { - return true - } - } - return false -} - -// VisitAll visits the command-line flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits all flags, even those not set. -func VisitAll(fn func(*Flag)) { - CommandLine.VisitAll(fn) -} - -// Visit visits the flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits only those flags that have been set. -func (f *FlagSet) Visit(fn func(*Flag)) { - if len(f.actual) == 0 { - return - } - - var flags []*Flag - if f.SortFlags { - if len(f.actual) != len(f.sortedActual) { - f.sortedActual = sortFlags(f.actual) - } - flags = f.sortedActual - } else { - flags = f.orderedActual - } - - for _, flag := range flags { - fn(flag) - } -} - -// Visit visits the command-line flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits only those flags that have been set. -func Visit(fn func(*Flag)) { - CommandLine.Visit(fn) -} - -// Lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) Lookup(name string) *Flag { - return f.lookup(f.normalizeFlagName(name)) -} - -// ShorthandLookup returns the Flag structure of the short handed flag, -// returning nil if none exists. -// It panics, if len(name) > 1. -func (f *FlagSet) ShorthandLookup(name string) *Flag { - if name == "" { - return nil - } - if len(name) > 1 { - msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - c := name[0] - return f.shorthands[c] -} - -// lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) lookup(name NormalizedName) *Flag { - return f.formal[name] -} - -// func to return a given type for a given flag name -func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { - flag := f.Lookup(name) - if flag == nil { - err := fmt.Errorf("flag accessed but not defined: %s", name) - return nil, err - } - - if flag.Value.Type() != ftype { - err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type()) - return nil, err - } - - sval := flag.Value.String() - result, err := convFunc(sval) - if err != nil { - return nil, err - } - return result, nil -} - -// ArgsLenAtDash will return the length of f.Args at the moment when a -- was -// found during arg parsing. This allows your program to know which args were -// before the -- and which came after. -func (f *FlagSet) ArgsLenAtDash() int { - return f.argsLenAtDash -} - -// MarkDeprecated indicated that a flag is deprecated in your program. It will -// continue to function but will not show up in help or usage messages. Using -// this flag will also print the given usageMessage. -func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - if usageMessage == "" { - return fmt.Errorf("deprecated message for flag %q must be set", name) - } - flag.Deprecated = usageMessage - flag.Hidden = true - return nil -} - -// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your -// program. It will continue to function but will not show up in help or usage -// messages. Using this flag will also print the given usageMessage. -func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - if usageMessage == "" { - return fmt.Errorf("deprecated message for flag %q must be set", name) - } - flag.ShorthandDeprecated = usageMessage - return nil -} - -// MarkHidden sets a flag to 'hidden' in your program. It will continue to -// function but will not show up in help or usage messages. -func (f *FlagSet) MarkHidden(name string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - flag.Hidden = true - return nil -} - -// Lookup returns the Flag structure of the named command-line flag, -// returning nil if none exists. -func Lookup(name string) *Flag { - return CommandLine.Lookup(name) -} - -// ShorthandLookup returns the Flag structure of the short handed flag, -// returning nil if none exists. -func ShorthandLookup(name string) *Flag { - return CommandLine.ShorthandLookup(name) -} - -// Set sets the value of the named flag. -func (f *FlagSet) Set(name, value string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - - err := flag.Value.Set(value) - if err != nil { - var flagName string - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) - } else { - flagName = fmt.Sprintf("--%s", flag.Name) - } - return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) - } - - if !flag.Changed { - if f.actual == nil { - f.actual = make(map[NormalizedName]*Flag) - } - f.actual[normalName] = flag - f.orderedActual = append(f.orderedActual, flag) - - flag.Changed = true - } - - if flag.Deprecated != "" { - fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) - } - return nil -} - -// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet. -// This is sometimes used by spf13/cobra programs which want to generate additional -// bash completion information. -func (f *FlagSet) SetAnnotation(name, key string, values []string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - if flag.Annotations == nil { - flag.Annotations = map[string][]string{} - } - flag.Annotations[key] = values - return nil -} - -// Changed returns true if the flag was explicitly set during Parse() and false -// otherwise -func (f *FlagSet) Changed(name string) bool { - flag := f.Lookup(name) - // If a flag doesn't exist, it wasn't changed.... - if flag == nil { - return false - } - return flag.Changed -} - -// Set sets the value of the named command-line flag. -func Set(name, value string) error { - return CommandLine.Set(name, value) -} - -// PrintDefaults prints, to standard error unless configured -// otherwise, the default values of all defined flags in the set. -func (f *FlagSet) PrintDefaults() { - usages := f.FlagUsages() - fmt.Fprint(f.out(), usages) -} - -// defaultIsZeroValue returns true if the default value for this flag represents -// a zero value. -func (f *Flag) defaultIsZeroValue() bool { - switch f.Value.(type) { - case boolFlag: - return f.DefValue == "false" - case *durationValue: - // Beginning in Go 1.7, duration zero values are "0s" - return f.DefValue == "0" || f.DefValue == "0s" - case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value: - return f.DefValue == "0" - case *stringValue: - return f.DefValue == "" - case *ipValue, *ipMaskValue, *ipNetValue: - return f.DefValue == "" - case *intSliceValue, *stringSliceValue, *stringArrayValue: - return f.DefValue == "[]" - default: - switch f.Value.String() { - case "false": - return true - case "": - return true - case "": - return true - case "0": - return true - } - return false - } -} - -// UnquoteUsage extracts a back-quoted name from the usage -// string for a flag and returns it and the un-quoted usage. -// Given "a `name` to show" it returns ("name", "a name to show"). -// If there are no back quotes, the name is an educated guess of the -// type of the flag's value, or the empty string if the flag is boolean. -func UnquoteUsage(flag *Flag) (name string, usage string) { - // Look for a back-quoted name, but avoid the strings package. - usage = flag.Usage - for i := 0; i < len(usage); i++ { - if usage[i] == '`' { - for j := i + 1; j < len(usage); j++ { - if usage[j] == '`' { - name = usage[i+1 : j] - usage = usage[:i] + name + usage[j+1:] - return name, usage - } - } - break // Only one back quote; use type name. - } - } - - name = flag.Value.Type() - switch name { - case "bool": - name = "" - case "float64": - name = "float" - case "int64": - name = "int" - case "uint64": - name = "uint" - case "stringSlice": - name = "strings" - case "intSlice": - name = "ints" - case "uintSlice": - name = "uints" - case "boolSlice": - name = "bools" - } - - return -} - -// Splits the string `s` on whitespace into an initial substring up to -// `i` runes in length and the remainder. Will go `slop` over `i` if -// that encompasses the entire string (which allows the caller to -// avoid short orphan words on the final line). -func wrapN(i, slop int, s string) (string, string) { - if i+slop > len(s) { - return s, "" - } - - w := strings.LastIndexAny(s[:i], " \t\n") - if w <= 0 { - return s, "" - } - nlPos := strings.LastIndex(s[:i], "\n") - if nlPos > 0 && nlPos < w { - return s[:nlPos], s[nlPos+1:] - } - return s[:w], s[w+1:] -} - -// Wraps the string `s` to a maximum width `w` with leading indent -// `i`. The first line is not indented (this is assumed to be done by -// caller). Pass `w` == 0 to do no wrapping -func wrap(i, w int, s string) string { - if w == 0 { - return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1) - } - - // space between indent i and end of line width w into which - // we should wrap the text. - wrap := w - i - - var r, l string - - // Not enough space for sensible wrapping. Wrap as a block on - // the next line instead. - if wrap < 24 { - i = 16 - wrap = w - i - r += "\n" + strings.Repeat(" ", i) - } - // If still not enough space then don't even try to wrap. - if wrap < 24 { - return strings.Replace(s, "\n", r, -1) - } - - // Try to avoid short orphan words on the final line, by - // allowing wrapN to go a bit over if that would fit in the - // remainder of the line. - slop := 5 - wrap = wrap - slop - - // Handle first line, which is indented by the caller (or the - // special case above) - l, s = wrapN(wrap, slop, s) - r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1) - - // Now wrap the rest - for s != "" { - var t string - - t, s = wrapN(wrap, slop, s) - r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1) - } - - return r - -} - -// FlagUsagesWrapped returns a string containing the usage information -// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no -// wrapping) -func (f *FlagSet) FlagUsagesWrapped(cols int) string { - buf := new(bytes.Buffer) - - lines := make([]string, 0, len(f.formal)) - - maxlen := 0 - f.VisitAll(func(flag *Flag) { - if flag.Hidden { - return - } - - line := "" - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) - } else { - line = fmt.Sprintf(" --%s", flag.Name) - } - - varname, usage := UnquoteUsage(flag) - if varname != "" { - line += " " + varname - } - if flag.NoOptDefVal != "" { - switch flag.Value.Type() { - case "string": - line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) - case "bool": - if flag.NoOptDefVal != "true" { - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - case "count": - if flag.NoOptDefVal != "+1" { - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - default: - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - } - - // This special character will be replaced with spacing once the - // correct alignment is calculated - line += "\x00" - if len(line) > maxlen { - maxlen = len(line) - } - - line += usage - if !flag.defaultIsZeroValue() { - if flag.Value.Type() == "string" { - line += fmt.Sprintf(" (default %q)", flag.DefValue) - } else { - line += fmt.Sprintf(" (default %s)", flag.DefValue) - } - } - if len(flag.Deprecated) != 0 { - line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated) - } - - lines = append(lines, line) - }) - - for _, line := range lines { - sidx := strings.Index(line, "\x00") - spacing := strings.Repeat(" ", maxlen-sidx) - // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx - fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:])) - } - - return buf.String() -} - -// FlagUsages returns a string containing the usage information for all flags in -// the FlagSet -func (f *FlagSet) FlagUsages() string { - return f.FlagUsagesWrapped(0) -} - -// PrintDefaults prints to standard error the default values of all defined command-line flags. -func PrintDefaults() { - CommandLine.PrintDefaults() -} - -// defaultUsage is the default function to print a usage message. -func defaultUsage(f *FlagSet) { - fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) - f.PrintDefaults() -} - -// NOTE: Usage is not just defaultUsage(CommandLine) -// because it serves (via godoc flag Usage) as the example -// for how to write your own usage function. - -// Usage prints to standard error a usage message documenting all defined command-line flags. -// The function is a variable that may be changed to point to a custom function. -// By default it prints a simple header and calls PrintDefaults; for details about the -// format of the output and how to control it, see the documentation for PrintDefaults. -var Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - PrintDefaults() -} - -// NFlag returns the number of flags that have been set. -func (f *FlagSet) NFlag() int { return len(f.actual) } - -// NFlag returns the number of command-line flags that have been set. -func NFlag() int { return len(CommandLine.actual) } - -// Arg returns the i'th argument. Arg(0) is the first remaining argument -// after flags have been processed. -func (f *FlagSet) Arg(i int) string { - if i < 0 || i >= len(f.args) { - return "" - } - return f.args[i] -} - -// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument -// after flags have been processed. -func Arg(i int) string { - return CommandLine.Arg(i) -} - -// NArg is the number of arguments remaining after flags have been processed. -func (f *FlagSet) NArg() int { return len(f.args) } - -// NArg is the number of arguments remaining after flags have been processed. -func NArg() int { return len(CommandLine.args) } - -// Args returns the non-flag arguments. -func (f *FlagSet) Args() []string { return f.args } - -// Args returns the non-flag command-line arguments. -func Args() []string { return CommandLine.args } - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func (f *FlagSet) Var(value Value, name string, usage string) { - f.VarP(value, name, "", usage) -} - -// VarPF is like VarP, but returns the flag created -func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: name, - Shorthand: shorthand, - Usage: usage, - Value: value, - DefValue: value.String(), - } - f.AddFlag(flag) - return flag -} - -// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { - f.VarPF(value, name, shorthand, usage) -} - -// AddFlag will add the flag to the FlagSet -func (f *FlagSet) AddFlag(flag *Flag) { - normalizedFlagName := f.normalizeFlagName(flag.Name) - - _, alreadyThere := f.formal[normalizedFlagName] - if alreadyThere { - msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) - fmt.Fprintln(f.out(), msg) - panic(msg) // Happens only if flags are declared with identical names - } - if f.formal == nil { - f.formal = make(map[NormalizedName]*Flag) - } - - flag.Name = string(normalizedFlagName) - f.formal[normalizedFlagName] = flag - f.orderedFormal = append(f.orderedFormal, flag) - - if flag.Shorthand == "" { - return - } - if len(flag.Shorthand) > 1 { - msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - if f.shorthands == nil { - f.shorthands = make(map[byte]*Flag) - } - c := flag.Shorthand[0] - used, alreadyThere := f.shorthands[c] - if alreadyThere { - msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - f.shorthands[c] = flag -} - -// AddFlagSet adds one FlagSet to another. If a flag is already present in f -// the flag from newSet will be ignored. -func (f *FlagSet) AddFlagSet(newSet *FlagSet) { - if newSet == nil { - return - } - newSet.VisitAll(func(flag *Flag) { - if f.Lookup(flag.Name) == nil { - f.AddFlag(flag) - } - }) -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func Var(value Value, name string, usage string) { - CommandLine.VarP(value, name, "", usage) -} - -// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. -func VarP(value Value, name, shorthand, usage string) { - CommandLine.VarP(value, name, shorthand, usage) -} - -// failf prints to standard error a formatted error and usage message and -// returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) - if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.out(), err) - f.usage() - } - return err -} - -// usage calls the Usage method for the flag set, or the usage function if -// the flag set is CommandLine. -func (f *FlagSet) usage() { - if f == CommandLine { - Usage() - } else if f.Usage == nil { - defaultUsage(f) - } else { - f.Usage() - } -} - -//--unknown (args will be empty) -//--unknown --next-flag ... (args will be --next-flag ...) -//--unknown arg ... (args will be arg ...) -func stripUnknownFlagValue(args []string) []string { - if len(args) == 0 { - //--unknown - return args - } - - first := args[0] - if len(first) > 0 && first[0] == '-' { - //--unknown --next-flag ... - return args - } - - //--unknown arg ... (args will be arg ...) - if len(args) > 1 { - return args[1:] - } - return nil -} - -func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { - a = args - name := s[2:] - if len(name) == 0 || name[0] == '-' || name[0] == '=' { - err = f.failf("bad flag syntax: %s", s) - return - } - - split := strings.SplitN(name, "=", 2) - name = split[0] - flag, exists := f.formal[f.normalizeFlagName(name)] - - if !exists { - switch { - case name == "help": - f.usage() - return a, ErrHelp - case f.ParseErrorsWhitelist.UnknownFlags: - // --unknown=unknownval arg ... - // we do not want to lose arg in this case - if len(split) >= 2 { - return a, nil - } - - return stripUnknownFlagValue(a), nil - default: - err = f.failf("unknown flag: --%s", name) - return - } - } - - var value string - if len(split) == 2 { - // '--flag=arg' - value = split[1] - } else if flag.NoOptDefVal != "" { - // '--flag' (arg was optional) - value = flag.NoOptDefVal - } else if len(a) > 0 { - // '--flag arg' - value = a[0] - a = a[1:] - } else { - // '--flag' (arg was required) - err = f.failf("flag needs an argument: %s", s) - return - } - - err = fn(flag, value) - if err != nil { - f.failf(err.Error()) - } - return -} - -func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { - outArgs = args - - if strings.HasPrefix(shorthands, "test.") { - return - } - - outShorts = shorthands[1:] - c := shorthands[0] - - flag, exists := f.shorthands[c] - if !exists { - switch { - case c == 'h': - f.usage() - err = ErrHelp - return - case f.ParseErrorsWhitelist.UnknownFlags: - // '-f=arg arg ...' - // we do not want to lose arg in this case - if len(shorthands) > 2 && shorthands[1] == '=' { - outShorts = "" - return - } - - outArgs = stripUnknownFlagValue(outArgs) - return - default: - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) - return - } - } - - var value string - if len(shorthands) > 2 && shorthands[1] == '=' { - // '-f=arg' - value = shorthands[2:] - outShorts = "" - } else if flag.NoOptDefVal != "" { - // '-f' (arg was optional) - value = flag.NoOptDefVal - } else if len(shorthands) > 1 { - // '-farg' - value = shorthands[1:] - outShorts = "" - } else if len(args) > 0 { - // '-f arg' - value = args[0] - outArgs = args[1:] - } else { - // '-f' (arg was required) - err = f.failf("flag needs an argument: %q in -%s", c, shorthands) - return - } - - if flag.ShorthandDeprecated != "" { - fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) - } - - err = fn(flag, value) - if err != nil { - f.failf(err.Error()) - } - return -} - -func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) { - a = args - shorthands := s[1:] - - // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv"). - for len(shorthands) > 0 { - shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn) - if err != nil { - return - } - } - - return -} - -func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) { - for len(args) > 0 { - s := args[0] - args = args[1:] - if len(s) == 0 || s[0] != '-' || len(s) == 1 { - if !f.interspersed { - f.args = append(f.args, s) - f.args = append(f.args, args...) - return nil - } - f.args = append(f.args, s) - continue - } - - if s[1] == '-' { - if len(s) == 2 { // "--" terminates the flags - f.argsLenAtDash = len(f.args) - f.args = append(f.args, args...) - break - } - args, err = f.parseLongArg(s, args, fn) - } else { - args, err = f.parseShortArg(s, args, fn) - } - if err != nil { - return - } - } - return -} - -// Parse parses flag definitions from the argument list, which should not -// include the command name. Must be called after all flags in the FlagSet -// are defined and before flags are accessed by the program. -// The return value will be ErrHelp if -help was set but not defined. -func (f *FlagSet) Parse(arguments []string) error { - if f.addedGoFlagSets != nil { - for _, goFlagSet := range f.addedGoFlagSets { - goFlagSet.Parse(nil) - } - } - f.parsed = true - - if len(arguments) < 0 { - return nil - } - - f.args = make([]string, 0, len(arguments)) - - set := func(flag *Flag, value string) error { - return f.Set(flag.Name, value) - } - - err := f.parseArgs(arguments, set) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - fmt.Println(err) - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -type parseFunc func(flag *Flag, value string) error - -// ParseAll parses flag definitions from the argument list, which should not -// include the command name. The arguments for fn are flag and value. Must be -// called after all flags in the FlagSet are defined and before flags are -// accessed by the program. The return value will be ErrHelp if -help was set -// but not defined. -func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error { - f.parsed = true - f.args = make([]string, 0, len(arguments)) - - err := f.parseArgs(arguments, fn) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -// Parsed reports whether f.Parse has been called. -func (f *FlagSet) Parsed() bool { - return f.parsed -} - -// Parse parses the command-line flags from os.Args[1:]. Must be called -// after all flags are defined and before flags are accessed by the program. -func Parse() { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.Parse(os.Args[1:]) -} - -// ParseAll parses the command-line flags from os.Args[1:] and called fn for each. -// The arguments for fn are flag and value. Must be called after all flags are -// defined and before flags are accessed by the program. -func ParseAll(fn func(flag *Flag, value string) error) { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.ParseAll(os.Args[1:], fn) -} - -// SetInterspersed sets whether to support interspersed option/non-option arguments. -func SetInterspersed(interspersed bool) { - CommandLine.SetInterspersed(interspersed) -} - -// Parsed returns true if the command-line flags have been parsed. -func Parsed() bool { - return CommandLine.Parsed() -} - -// CommandLine is the default set of command-line flags, parsed from os.Args. -var CommandLine = NewFlagSet(os.Args[0], ExitOnError) - -// NewFlagSet returns a new, empty flag set with the specified name, -// error handling property and SortFlags set to true. -func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { - f := &FlagSet{ - name: name, - errorHandling: errorHandling, - argsLenAtDash: -1, - interspersed: true, - SortFlags: true, - } - return f -} - -// SetInterspersed sets whether to support interspersed option/non-option arguments. -func (f *FlagSet) SetInterspersed(interspersed bool) { - f.interspersed = interspersed -} - -// Init sets the name and error handling property for a flag set. -// By default, the zero FlagSet uses an empty name and the -// ContinueOnError error handling policy. -func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { - f.name = name - f.errorHandling = errorHandling - f.argsLenAtDash = -1 -} diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go deleted file mode 100644 index a243f81f7f..0000000000 --- a/vendor/github.com/spf13/pflag/float32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- float32 Value -type float32Value float32 - -func newFloat32Value(val float32, p *float32) *float32Value { - *p = val - return (*float32Value)(p) -} - -func (f *float32Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 32) - *f = float32Value(v) - return err -} - -func (f *float32Value) Type() string { - return "float32" -} - -func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) } - -func float32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseFloat(sval, 32) - if err != nil { - return 0, err - } - return float32(v), nil -} - -// GetFloat32 return the float32 value of a flag with the given name -func (f *FlagSet) GetFloat32(name string) (float32, error) { - val, err := f.getFlagType(name, "float32", float32Conv) - if err != nil { - return 0, err - } - return val.(float32), nil -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func Float32Var(p *float32, name string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, "", value, usage) - return p -} - -// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, shorthand, value, usage) - return p -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func Float32(name string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, "", value, usage) -} - -// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. -func Float32P(name, shorthand string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/float32_slice.go b/vendor/github.com/spf13/pflag/float32_slice.go deleted file mode 100644 index caa352741a..0000000000 --- a/vendor/github.com/spf13/pflag/float32_slice.go +++ /dev/null @@ -1,174 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- float32Slice Value -type float32SliceValue struct { - value *[]float32 - changed bool -} - -func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue { - isv := new(float32SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *float32SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]float32, len(ss)) - for i, d := range ss { - var err error - var temp64 float64 - temp64, err = strconv.ParseFloat(d, 32) - if err != nil { - return err - } - out[i] = float32(temp64) - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *float32SliceValue) Type() string { - return "float32Slice" -} - -func (s *float32SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%f", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *float32SliceValue) fromString(val string) (float32, error) { - t64, err := strconv.ParseFloat(val, 32) - if err != nil { - return 0, err - } - return float32(t64), nil -} - -func (s *float32SliceValue) toString(val float32) string { - return fmt.Sprintf("%f", val) -} - -func (s *float32SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *float32SliceValue) Replace(val []string) error { - out := make([]float32, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *float32SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func float32SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []float32{}, nil - } - ss := strings.Split(val, ",") - out := make([]float32, len(ss)) - for i, d := range ss { - var err error - var temp64 float64 - temp64, err = strconv.ParseFloat(d, 32) - if err != nil { - return nil, err - } - out[i] = float32(temp64) - - } - return out, nil -} - -// GetFloat32Slice return the []float32 value of a flag with the given name -func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) { - val, err := f.getFlagType(name, "float32Slice", float32SliceConv) - if err != nil { - return []float32{}, err - } - return val.([]float32), nil -} - -// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string. -// The argument p points to a []float32 variable in which to store the value of the flag. -func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) { - f.VarP(newFloat32SliceValue(value, p), name, "", usage) -} - -// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { - f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) -} - -// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string. -// The argument p points to a float32[] variable in which to store the value of the flag. -func Float32SliceVar(p *[]float32, name string, value []float32, usage string) { - CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage) -} - -// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { - CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) -} - -// Float32Slice defines a []float32 flag with specified name, default value, and usage string. -// The return value is the address of a []float32 variable that stores the value of the flag. -func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 { - p := []float32{} - f.Float32SliceVarP(&p, name, "", value, usage) - return &p -} - -// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { - p := []float32{} - f.Float32SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Float32Slice defines a []float32 flag with specified name, default value, and usage string. -// The return value is the address of a []float32 variable that stores the value of the flag. -func Float32Slice(name string, value []float32, usage string) *[]float32 { - return CommandLine.Float32SliceP(name, "", value, usage) -} - -// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. -func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { - return CommandLine.Float32SliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go deleted file mode 100644 index 04b5492a7d..0000000000 --- a/vendor/github.com/spf13/pflag/float64.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- float64 Value -type float64Value float64 - -func newFloat64Value(val float64, p *float64) *float64Value { - *p = val - return (*float64Value)(p) -} - -func (f *float64Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 64) - *f = float64Value(v) - return err -} - -func (f *float64Value) Type() string { - return "float64" -} - -func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) } - -func float64Conv(sval string) (interface{}, error) { - return strconv.ParseFloat(sval, 64) -} - -// GetFloat64 return the float64 value of a flag with the given name -func (f *FlagSet) GetFloat64(name string) (float64, error) { - val, err := f.getFlagType(name, "float64", float64Conv) - if err != nil { - return 0, err - } - return val.(float64), nil -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func Float64Var(p *float64, name string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, "", value, usage) - return p -} - -// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, shorthand, value, usage) - return p -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func Float64(name string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, "", value, usage) -} - -// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. -func Float64P(name, shorthand string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/float64_slice.go b/vendor/github.com/spf13/pflag/float64_slice.go deleted file mode 100644 index 85bf3073d5..0000000000 --- a/vendor/github.com/spf13/pflag/float64_slice.go +++ /dev/null @@ -1,166 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- float64Slice Value -type float64SliceValue struct { - value *[]float64 - changed bool -} - -func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue { - isv := new(float64SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *float64SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]float64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseFloat(d, 64) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *float64SliceValue) Type() string { - return "float64Slice" -} - -func (s *float64SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%f", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *float64SliceValue) fromString(val string) (float64, error) { - return strconv.ParseFloat(val, 64) -} - -func (s *float64SliceValue) toString(val float64) string { - return fmt.Sprintf("%f", val) -} - -func (s *float64SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *float64SliceValue) Replace(val []string) error { - out := make([]float64, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *float64SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func float64SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []float64{}, nil - } - ss := strings.Split(val, ",") - out := make([]float64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseFloat(d, 64) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetFloat64Slice return the []float64 value of a flag with the given name -func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) { - val, err := f.getFlagType(name, "float64Slice", float64SliceConv) - if err != nil { - return []float64{}, err - } - return val.([]float64), nil -} - -// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string. -// The argument p points to a []float64 variable in which to store the value of the flag. -func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) { - f.VarP(newFloat64SliceValue(value, p), name, "", usage) -} - -// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { - f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) -} - -// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string. -// The argument p points to a float64[] variable in which to store the value of the flag. -func Float64SliceVar(p *[]float64, name string, value []float64, usage string) { - CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage) -} - -// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { - CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) -} - -// Float64Slice defines a []float64 flag with specified name, default value, and usage string. -// The return value is the address of a []float64 variable that stores the value of the flag. -func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 { - p := []float64{} - f.Float64SliceVarP(&p, name, "", value, usage) - return &p -} - -// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { - p := []float64{} - f.Float64SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Float64Slice defines a []float64 flag with specified name, default value, and usage string. -// The return value is the address of a []float64 variable that stores the value of the flag. -func Float64Slice(name string, value []float64, usage string) *[]float64 { - return CommandLine.Float64SliceP(name, "", value, usage) -} - -// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. -func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { - return CommandLine.Float64SliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go deleted file mode 100644 index d3dd72b7fe..0000000000 --- a/vendor/github.com/spf13/pflag/golangflag.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - goflag "flag" - "reflect" - "strings" -) - -// flagValueWrapper implements pflag.Value around a flag.Value. The main -// difference here is the addition of the Type method that returns a string -// name of the type. As this is generally unknown, we approximate that with -// reflection. -type flagValueWrapper struct { - inner goflag.Value - flagType string -} - -// We are just copying the boolFlag interface out of goflag as that is what -// they use to decide if a flag should get "true" when no arg is given. -type goBoolFlag interface { - goflag.Value - IsBoolFlag() bool -} - -func wrapFlagValue(v goflag.Value) Value { - // If the flag.Value happens to also be a pflag.Value, just use it directly. - if pv, ok := v.(Value); ok { - return pv - } - - pv := &flagValueWrapper{ - inner: v, - } - - t := reflect.TypeOf(v) - if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr { - t = t.Elem() - } - - pv.flagType = strings.TrimSuffix(t.Name(), "Value") - return pv -} - -func (v *flagValueWrapper) String() string { - return v.inner.String() -} - -func (v *flagValueWrapper) Set(s string) error { - return v.inner.Set(s) -} - -func (v *flagValueWrapper) Type() string { - return v.flagType -} - -// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag -// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei -// with both `-v` and `--v` in flags. If the golang flag was more than a single -// character (ex: `verbose`) it will only be accessible via `--verbose` -func PFlagFromGoFlag(goflag *goflag.Flag) *Flag { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: goflag.Name, - Usage: goflag.Usage, - Value: wrapFlagValue(goflag.Value), - // Looks like golang flags don't set DefValue correctly :-( - //DefValue: goflag.DefValue, - DefValue: goflag.Value.String(), - } - // Ex: if the golang flag was -v, allow both -v and --v to work - if len(flag.Name) == 1 { - flag.Shorthand = flag.Name - } - if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() { - flag.NoOptDefVal = "true" - } - return flag -} - -// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet -func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) { - if f.Lookup(goflag.Name) != nil { - return - } - newflag := PFlagFromGoFlag(goflag) - f.AddFlag(newflag) -} - -// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet -func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { - if newSet == nil { - return - } - newSet.VisitAll(func(goflag *goflag.Flag) { - f.AddGoFlag(goflag) - }) - if f.addedGoFlagSets == nil { - f.addedGoFlagSets = make([]*goflag.FlagSet, 0) - } - f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) -} diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go deleted file mode 100644 index 1474b89df6..0000000000 --- a/vendor/github.com/spf13/pflag/int.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- int Value -type intValue int - -func newIntValue(val int, p *int) *intValue { - *p = val - return (*intValue)(p) -} - -func (i *intValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = intValue(v) - return err -} - -func (i *intValue) Type() string { - return "int" -} - -func (i *intValue) String() string { return strconv.Itoa(int(*i)) } - -func intConv(sval string) (interface{}, error) { - return strconv.Atoi(sval) -} - -// GetInt return the int value of a flag with the given name -func (f *FlagSet) GetInt(name string) (int, error) { - val, err := f.getFlagType(name, "int", intConv) - if err != nil { - return 0, err - } - return val.(int), nil -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { - f.VarP(newIntValue(value, p), name, "", usage) -} - -// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { - f.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func IntVar(p *int, name string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, "", usage) -} - -// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. -func IntVarP(p *int, name, shorthand string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func (f *FlagSet) Int(name string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, "", value, usage) - return p -} - -// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, shorthand, value, usage) - return p -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func Int(name string, value int, usage string) *int { - return CommandLine.IntP(name, "", value, usage) -} - -// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. -func IntP(name, shorthand string, value int, usage string) *int { - return CommandLine.IntP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go deleted file mode 100644 index f1a01d05e6..0000000000 --- a/vendor/github.com/spf13/pflag/int16.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int16 Value -type int16Value int16 - -func newInt16Value(val int16, p *int16) *int16Value { - *p = val - return (*int16Value)(p) -} - -func (i *int16Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 16) - *i = int16Value(v) - return err -} - -func (i *int16Value) Type() string { - return "int16" -} - -func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int16Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 16) - if err != nil { - return 0, err - } - return int16(v), nil -} - -// GetInt16 returns the int16 value of a flag with the given name -func (f *FlagSet) GetInt16(name string) (int16, error) { - val, err := f.getFlagType(name, "int16", int16Conv) - if err != nil { - return 0, err - } - return val.(int16), nil -} - -// Int16Var defines an int16 flag with specified name, default value, and usage string. -// The argument p points to an int16 variable in which to store the value of the flag. -func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) { - f.VarP(newInt16Value(value, p), name, "", usage) -} - -// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) { - f.VarP(newInt16Value(value, p), name, shorthand, usage) -} - -// Int16Var defines an int16 flag with specified name, default value, and usage string. -// The argument p points to an int16 variable in which to store the value of the flag. -func Int16Var(p *int16, name string, value int16, usage string) { - CommandLine.VarP(newInt16Value(value, p), name, "", usage) -} - -// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. -func Int16VarP(p *int16, name, shorthand string, value int16, usage string) { - CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage) -} - -// Int16 defines an int16 flag with specified name, default value, and usage string. -// The return value is the address of an int16 variable that stores the value of the flag. -func (f *FlagSet) Int16(name string, value int16, usage string) *int16 { - p := new(int16) - f.Int16VarP(p, name, "", value, usage) - return p -} - -// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 { - p := new(int16) - f.Int16VarP(p, name, shorthand, value, usage) - return p -} - -// Int16 defines an int16 flag with specified name, default value, and usage string. -// The return value is the address of an int16 variable that stores the value of the flag. -func Int16(name string, value int16, usage string) *int16 { - return CommandLine.Int16P(name, "", value, usage) -} - -// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. -func Int16P(name, shorthand string, value int16, usage string) *int16 { - return CommandLine.Int16P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go deleted file mode 100644 index 9b95944f0f..0000000000 --- a/vendor/github.com/spf13/pflag/int32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int32 Value -type int32Value int32 - -func newInt32Value(val int32, p *int32) *int32Value { - *p = val - return (*int32Value)(p) -} - -func (i *int32Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 32) - *i = int32Value(v) - return err -} - -func (i *int32Value) Type() string { - return "int32" -} - -func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 32) - if err != nil { - return 0, err - } - return int32(v), nil -} - -// GetInt32 return the int32 value of a flag with the given name -func (f *FlagSet) GetInt32(name string) (int32, error) { - val, err := f.getFlagType(name, "int32", int32Conv) - if err != nil { - return 0, err - } - return val.(int32), nil -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, "", usage) -} - -// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func Int32Var(p *int32, name string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, "", usage) -} - -// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, "", value, usage) - return p -} - -// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, shorthand, value, usage) - return p -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func Int32(name string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, "", value, usage) -} - -// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. -func Int32P(name, shorthand string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int32_slice.go b/vendor/github.com/spf13/pflag/int32_slice.go deleted file mode 100644 index ff128ff06d..0000000000 --- a/vendor/github.com/spf13/pflag/int32_slice.go +++ /dev/null @@ -1,174 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- int32Slice Value -type int32SliceValue struct { - value *[]int32 - changed bool -} - -func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue { - isv := new(int32SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *int32SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]int32, len(ss)) - for i, d := range ss { - var err error - var temp64 int64 - temp64, err = strconv.ParseInt(d, 0, 32) - if err != nil { - return err - } - out[i] = int32(temp64) - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *int32SliceValue) Type() string { - return "int32Slice" -} - -func (s *int32SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *int32SliceValue) fromString(val string) (int32, error) { - t64, err := strconv.ParseInt(val, 0, 32) - if err != nil { - return 0, err - } - return int32(t64), nil -} - -func (s *int32SliceValue) toString(val int32) string { - return fmt.Sprintf("%d", val) -} - -func (s *int32SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *int32SliceValue) Replace(val []string) error { - out := make([]int32, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *int32SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func int32SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []int32{}, nil - } - ss := strings.Split(val, ",") - out := make([]int32, len(ss)) - for i, d := range ss { - var err error - var temp64 int64 - temp64, err = strconv.ParseInt(d, 0, 32) - if err != nil { - return nil, err - } - out[i] = int32(temp64) - - } - return out, nil -} - -// GetInt32Slice return the []int32 value of a flag with the given name -func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) { - val, err := f.getFlagType(name, "int32Slice", int32SliceConv) - if err != nil { - return []int32{}, err - } - return val.([]int32), nil -} - -// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string. -// The argument p points to a []int32 variable in which to store the value of the flag. -func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) { - f.VarP(newInt32SliceValue(value, p), name, "", usage) -} - -// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { - f.VarP(newInt32SliceValue(value, p), name, shorthand, usage) -} - -// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string. -// The argument p points to a int32[] variable in which to store the value of the flag. -func Int32SliceVar(p *[]int32, name string, value []int32, usage string) { - CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage) -} - -// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { - CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage) -} - -// Int32Slice defines a []int32 flag with specified name, default value, and usage string. -// The return value is the address of a []int32 variable that stores the value of the flag. -func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 { - p := []int32{} - f.Int32SliceVarP(&p, name, "", value, usage) - return &p -} - -// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { - p := []int32{} - f.Int32SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Int32Slice defines a []int32 flag with specified name, default value, and usage string. -// The return value is the address of a []int32 variable that stores the value of the flag. -func Int32Slice(name string, value []int32, usage string) *[]int32 { - return CommandLine.Int32SliceP(name, "", value, usage) -} - -// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. -func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { - return CommandLine.Int32SliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go deleted file mode 100644 index 0026d781d9..0000000000 --- a/vendor/github.com/spf13/pflag/int64.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- int64 Value -type int64Value int64 - -func newInt64Value(val int64, p *int64) *int64Value { - *p = val - return (*int64Value)(p) -} - -func (i *int64Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = int64Value(v) - return err -} - -func (i *int64Value) Type() string { - return "int64" -} - -func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int64Conv(sval string) (interface{}, error) { - return strconv.ParseInt(sval, 0, 64) -} - -// GetInt64 return the int64 value of a flag with the given name -func (f *FlagSet) GetInt64(name string) (int64, error) { - val, err := f.getFlagType(name, "int64", int64Conv) - if err != nil { - return 0, err - } - return val.(int64), nil -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, "", usage) -} - -// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func Int64Var(p *int64, name string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, "", usage) -} - -// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, "", value, usage) - return p -} - -// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, shorthand, value, usage) - return p -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func Int64(name string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, "", value, usage) -} - -// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. -func Int64P(name, shorthand string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int64_slice.go b/vendor/github.com/spf13/pflag/int64_slice.go deleted file mode 100644 index 25464638f3..0000000000 --- a/vendor/github.com/spf13/pflag/int64_slice.go +++ /dev/null @@ -1,166 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- int64Slice Value -type int64SliceValue struct { - value *[]int64 - changed bool -} - -func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue { - isv := new(int64SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *int64SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]int64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseInt(d, 0, 64) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *int64SliceValue) Type() string { - return "int64Slice" -} - -func (s *int64SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *int64SliceValue) fromString(val string) (int64, error) { - return strconv.ParseInt(val, 0, 64) -} - -func (s *int64SliceValue) toString(val int64) string { - return fmt.Sprintf("%d", val) -} - -func (s *int64SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *int64SliceValue) Replace(val []string) error { - out := make([]int64, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *int64SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func int64SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []int64{}, nil - } - ss := strings.Split(val, ",") - out := make([]int64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseInt(d, 0, 64) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetInt64Slice return the []int64 value of a flag with the given name -func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) { - val, err := f.getFlagType(name, "int64Slice", int64SliceConv) - if err != nil { - return []int64{}, err - } - return val.([]int64), nil -} - -// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string. -// The argument p points to a []int64 variable in which to store the value of the flag. -func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) { - f.VarP(newInt64SliceValue(value, p), name, "", usage) -} - -// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { - f.VarP(newInt64SliceValue(value, p), name, shorthand, usage) -} - -// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string. -// The argument p points to a int64[] variable in which to store the value of the flag. -func Int64SliceVar(p *[]int64, name string, value []int64, usage string) { - CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage) -} - -// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { - CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage) -} - -// Int64Slice defines a []int64 flag with specified name, default value, and usage string. -// The return value is the address of a []int64 variable that stores the value of the flag. -func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 { - p := []int64{} - f.Int64SliceVarP(&p, name, "", value, usage) - return &p -} - -// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { - p := []int64{} - f.Int64SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Int64Slice defines a []int64 flag with specified name, default value, and usage string. -// The return value is the address of a []int64 variable that stores the value of the flag. -func Int64Slice(name string, value []int64, usage string) *[]int64 { - return CommandLine.Int64SliceP(name, "", value, usage) -} - -// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. -func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { - return CommandLine.Int64SliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go deleted file mode 100644 index 4da92228e6..0000000000 --- a/vendor/github.com/spf13/pflag/int8.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int8 Value -type int8Value int8 - -func newInt8Value(val int8, p *int8) *int8Value { - *p = val - return (*int8Value)(p) -} - -func (i *int8Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 8) - *i = int8Value(v) - return err -} - -func (i *int8Value) Type() string { - return "int8" -} - -func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int8Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 8) - if err != nil { - return 0, err - } - return int8(v), nil -} - -// GetInt8 return the int8 value of a flag with the given name -func (f *FlagSet) GetInt8(name string) (int8, error) { - val, err := f.getFlagType(name, "int8", int8Conv) - if err != nil { - return 0, err - } - return val.(int8), nil -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, "", usage) -} - -// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func Int8Var(p *int8, name string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, "", usage) -} - -// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, "", value, usage) - return p -} - -// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, shorthand, value, usage) - return p -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func Int8(name string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, "", value, usage) -} - -// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. -func Int8P(name, shorthand string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go deleted file mode 100644 index e71c39d91a..0000000000 --- a/vendor/github.com/spf13/pflag/int_slice.go +++ /dev/null @@ -1,158 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- intSlice Value -type intSliceValue struct { - value *[]int - changed bool -} - -func newIntSliceValue(val []int, p *[]int) *intSliceValue { - isv := new(intSliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *intSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]int, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *intSliceValue) Type() string { - return "intSlice" -} - -func (s *intSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *intSliceValue) Append(val string) error { - i, err := strconv.Atoi(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *intSliceValue) Replace(val []string) error { - out := make([]int, len(val)) - for i, d := range val { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *intSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = strconv.Itoa(d) - } - return out -} - -func intSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []int{}, nil - } - ss := strings.Split(val, ",") - out := make([]int, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetIntSlice return the []int value of a flag with the given name -func (f *FlagSet) GetIntSlice(name string) ([]int, error) { - val, err := f.getFlagType(name, "intSlice", intSliceConv) - if err != nil { - return []int{}, err - } - return val.([]int), nil -} - -// IntSliceVar defines a intSlice flag with specified name, default value, and usage string. -// The argument p points to a []int variable in which to store the value of the flag. -func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) { - f.VarP(newIntSliceValue(value, p), name, "", usage) -} - -// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { - f.VarP(newIntSliceValue(value, p), name, shorthand, usage) -} - -// IntSliceVar defines a int[] flag with specified name, default value, and usage string. -// The argument p points to a int[] variable in which to store the value of the flag. -func IntSliceVar(p *[]int, name string, value []int, usage string) { - CommandLine.VarP(newIntSliceValue(value, p), name, "", usage) -} - -// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. -func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { - CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage) -} - -// IntSlice defines a []int flag with specified name, default value, and usage string. -// The return value is the address of a []int variable that stores the value of the flag. -func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int { - p := []int{} - f.IntSliceVarP(&p, name, "", value, usage) - return &p -} - -// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int { - p := []int{} - f.IntSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// IntSlice defines a []int flag with specified name, default value, and usage string. -// The return value is the address of a []int variable that stores the value of the flag. -func IntSlice(name string, value []int, usage string) *[]int { - return CommandLine.IntSliceP(name, "", value, usage) -} - -// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. -func IntSliceP(name, shorthand string, value []int, usage string) *[]int { - return CommandLine.IntSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go deleted file mode 100644 index 3d414ba69f..0000000000 --- a/vendor/github.com/spf13/pflag/ip.go +++ /dev/null @@ -1,94 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strings" -) - -// -- net.IP value -type ipValue net.IP - -func newIPValue(val net.IP, p *net.IP) *ipValue { - *p = val - return (*ipValue)(p) -} - -func (i *ipValue) String() string { return net.IP(*i).String() } -func (i *ipValue) Set(s string) error { - ip := net.ParseIP(strings.TrimSpace(s)) - if ip == nil { - return fmt.Errorf("failed to parse IP: %q", s) - } - *i = ipValue(ip) - return nil -} - -func (i *ipValue) Type() string { - return "ip" -} - -func ipConv(sval string) (interface{}, error) { - ip := net.ParseIP(sval) - if ip != nil { - return ip, nil - } - return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) -} - -// GetIP return the net.IP value of a flag with the given name -func (f *FlagSet) GetIP(name string) (net.IP, error) { - val, err := f.getFlagType(name, "ip", ipConv) - if err != nil { - return nil, err - } - return val.(net.IP), nil -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, "", usage) -} - -// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func IPVar(p *net.IP, name string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, "", usage) -} - -// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. -func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, "", value, usage) - return p -} - -// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, shorthand, value, usage) - return p -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func IP(name string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, "", value, usage) -} - -// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. -func IPP(name, shorthand string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go deleted file mode 100644 index 775faae4fd..0000000000 --- a/vendor/github.com/spf13/pflag/ip_slice.go +++ /dev/null @@ -1,186 +0,0 @@ -package pflag - -import ( - "fmt" - "io" - "net" - "strings" -) - -// -- ipSlice Value -type ipSliceValue struct { - value *[]net.IP - changed bool -} - -func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue { - ipsv := new(ipSliceValue) - ipsv.value = p - *ipsv.value = val - return ipsv -} - -// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag. -// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended. -func (s *ipSliceValue) Set(val string) error { - - // remove all quote characters - rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") - - // read flag arguments with CSV parser - ipStrSlice, err := readAsCSV(rmQuote.Replace(val)) - if err != nil && err != io.EOF { - return err - } - - // parse ip values into slice - out := make([]net.IP, 0, len(ipStrSlice)) - for _, ipStr := range ipStrSlice { - ip := net.ParseIP(strings.TrimSpace(ipStr)) - if ip == nil { - return fmt.Errorf("invalid string being converted to IP address: %s", ipStr) - } - out = append(out, ip) - } - - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - - s.changed = true - - return nil -} - -// Type returns a string that uniquely represents this flag's type. -func (s *ipSliceValue) Type() string { - return "ipSlice" -} - -// String defines a "native" format for this net.IP slice flag value. -func (s *ipSliceValue) String() string { - - ipStrSlice := make([]string, len(*s.value)) - for i, ip := range *s.value { - ipStrSlice[i] = ip.String() - } - - out, _ := writeAsCSV(ipStrSlice) - - return "[" + out + "]" -} - -func (s *ipSliceValue) fromString(val string) (net.IP, error) { - return net.ParseIP(strings.TrimSpace(val)), nil -} - -func (s *ipSliceValue) toString(val net.IP) string { - return val.String() -} - -func (s *ipSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *ipSliceValue) Replace(val []string) error { - out := make([]net.IP, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *ipSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func ipSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []net.IP{}, nil - } - ss := strings.Split(val, ",") - out := make([]net.IP, len(ss)) - for i, sval := range ss { - ip := net.ParseIP(strings.TrimSpace(sval)) - if ip == nil { - return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) - } - out[i] = ip - } - return out, nil -} - -// GetIPSlice returns the []net.IP value of a flag with the given name -func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) { - val, err := f.getFlagType(name, "ipSlice", ipSliceConv) - if err != nil { - return []net.IP{}, err - } - return val.([]net.IP), nil -} - -// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string. -// The argument p points to a []net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { - f.VarP(newIPSliceValue(value, p), name, "", usage) -} - -// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { - f.VarP(newIPSliceValue(value, p), name, shorthand, usage) -} - -// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string. -// The argument p points to a []net.IP variable in which to store the value of the flag. -func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { - CommandLine.VarP(newIPSliceValue(value, p), name, "", usage) -} - -// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. -func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { - CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage) -} - -// IPSlice defines a []net.IP flag with specified name, default value, and usage string. -// The return value is the address of a []net.IP variable that stores the value of that flag. -func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP { - p := []net.IP{} - f.IPSliceVarP(&p, name, "", value, usage) - return &p -} - -// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { - p := []net.IP{} - f.IPSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// IPSlice defines a []net.IP flag with specified name, default value, and usage string. -// The return value is the address of a []net.IP variable that stores the value of the flag. -func IPSlice(name string, value []net.IP, usage string) *[]net.IP { - return CommandLine.IPSliceP(name, "", value, usage) -} - -// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. -func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { - return CommandLine.IPSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go deleted file mode 100644 index 5bd44bd21d..0000000000 --- a/vendor/github.com/spf13/pflag/ipmask.go +++ /dev/null @@ -1,122 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strconv" -) - -// -- net.IPMask value -type ipMaskValue net.IPMask - -func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { - *p = val - return (*ipMaskValue)(p) -} - -func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } -func (i *ipMaskValue) Set(s string) error { - ip := ParseIPv4Mask(s) - if ip == nil { - return fmt.Errorf("failed to parse IP mask: %q", s) - } - *i = ipMaskValue(ip) - return nil -} - -func (i *ipMaskValue) Type() string { - return "ipMask" -} - -// ParseIPv4Mask written in IP form (e.g. 255.255.255.0). -// This function should really belong to the net package. -func ParseIPv4Mask(s string) net.IPMask { - mask := net.ParseIP(s) - if mask == nil { - if len(s) != 8 { - return nil - } - // net.IPMask.String() actually outputs things like ffffff00 - // so write a horrible parser for that as well :-( - m := []int{} - for i := 0; i < 4; i++ { - b := "0x" + s[2*i:2*i+2] - d, err := strconv.ParseInt(b, 0, 0) - if err != nil { - return nil - } - m = append(m, int(d)) - } - s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3]) - mask = net.ParseIP(s) - if mask == nil { - return nil - } - } - return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) -} - -func parseIPv4Mask(sval string) (interface{}, error) { - mask := ParseIPv4Mask(sval) - if mask == nil { - return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval) - } - return mask, nil -} - -// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name -func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) { - val, err := f.getFlagType(name, "ipMask", parseIPv4Mask) - if err != nil { - return nil, err - } - return val.(net.IPMask), nil -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, "", value, usage) - return p -} - -// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, shorthand, value, usage) - return p -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func IPMask(name string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, "", value, usage) -} - -// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash. -func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go deleted file mode 100644 index e2c1b8bcd5..0000000000 --- a/vendor/github.com/spf13/pflag/ipnet.go +++ /dev/null @@ -1,98 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strings" -) - -// IPNet adapts net.IPNet for use as a flag. -type ipNetValue net.IPNet - -func (ipnet ipNetValue) String() string { - n := net.IPNet(ipnet) - return n.String() -} - -func (ipnet *ipNetValue) Set(value string) error { - _, n, err := net.ParseCIDR(strings.TrimSpace(value)) - if err != nil { - return err - } - *ipnet = ipNetValue(*n) - return nil -} - -func (*ipNetValue) Type() string { - return "ipNet" -} - -func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue { - *p = val - return (*ipNetValue)(p) -} - -func ipNetConv(sval string) (interface{}, error) { - _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) - if err == nil { - return *n, nil - } - return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval) -} - -// GetIPNet return the net.IPNet value of a flag with the given name -func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) { - val, err := f.getFlagType(name, "ipNet", ipNetConv) - if err != nil { - return net.IPNet{}, err - } - return val.(net.IPNet), nil -} - -// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. -// The argument p points to an net.IPNet variable in which to store the value of the flag. -func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { - f.VarP(newIPNetValue(value, p), name, "", usage) -} - -// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { - f.VarP(newIPNetValue(value, p), name, shorthand, usage) -} - -// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. -// The argument p points to an net.IPNet variable in which to store the value of the flag. -func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { - CommandLine.VarP(newIPNetValue(value, p), name, "", usage) -} - -// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. -func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { - CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage) -} - -// IPNet defines an net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of an net.IPNet variable that stores the value of the flag. -func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet { - p := new(net.IPNet) - f.IPNetVarP(p, name, "", value, usage) - return p -} - -// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { - p := new(net.IPNet) - f.IPNetVarP(p, name, shorthand, value, usage) - return p -} - -// IPNet defines an net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of an net.IPNet variable that stores the value of the flag. -func IPNet(name string, value net.IPNet, usage string) *net.IPNet { - return CommandLine.IPNetP(name, "", value, usage) -} - -// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. -func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { - return CommandLine.IPNetP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go deleted file mode 100644 index 04e0a26ff7..0000000000 --- a/vendor/github.com/spf13/pflag/string.go +++ /dev/null @@ -1,80 +0,0 @@ -package pflag - -// -- string Value -type stringValue string - -func newStringValue(val string, p *string) *stringValue { - *p = val - return (*stringValue)(p) -} - -func (s *stringValue) Set(val string) error { - *s = stringValue(val) - return nil -} -func (s *stringValue) Type() string { - return "string" -} - -func (s *stringValue) String() string { return string(*s) } - -func stringConv(sval string) (interface{}, error) { - return sval, nil -} - -// GetString return the string value of a flag with the given name -func (f *FlagSet) GetString(name string) (string, error) { - val, err := f.getFlagType(name, "string", stringConv) - if err != nil { - return "", err - } - return val.(string), nil -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { - f.VarP(newStringValue(value, p), name, "", usage) -} - -// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { - f.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func StringVar(p *string, name string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, "", usage) -} - -// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. -func StringVarP(p *string, name, shorthand string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func (f *FlagSet) String(name string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, "", value, usage) - return p -} - -// StringP is like String, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, shorthand, value, usage) - return p -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func String(name string, value string, usage string) *string { - return CommandLine.StringP(name, "", value, usage) -} - -// StringP is like String, but accepts a shorthand letter that can be used after a single dash. -func StringP(name, shorthand string, value string, usage string) *string { - return CommandLine.StringP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go deleted file mode 100644 index 4894af8180..0000000000 --- a/vendor/github.com/spf13/pflag/string_array.go +++ /dev/null @@ -1,129 +0,0 @@ -package pflag - -// -- stringArray Value -type stringArrayValue struct { - value *[]string - changed bool -} - -func newStringArrayValue(val []string, p *[]string) *stringArrayValue { - ssv := new(stringArrayValue) - ssv.value = p - *ssv.value = val - return ssv -} - -func (s *stringArrayValue) Set(val string) error { - if !s.changed { - *s.value = []string{val} - s.changed = true - } else { - *s.value = append(*s.value, val) - } - return nil -} - -func (s *stringArrayValue) Append(val string) error { - *s.value = append(*s.value, val) - return nil -} - -func (s *stringArrayValue) Replace(val []string) error { - out := make([]string, len(val)) - for i, d := range val { - var err error - out[i] = d - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *stringArrayValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = d - } - return out -} - -func (s *stringArrayValue) Type() string { - return "stringArray" -} - -func (s *stringArrayValue) String() string { - str, _ := writeAsCSV(*s.value) - return "[" + str + "]" -} - -func stringArrayConv(sval string) (interface{}, error) { - sval = sval[1 : len(sval)-1] - // An empty string would cause a array with one (empty) string - if len(sval) == 0 { - return []string{}, nil - } - return readAsCSV(sval) -} - -// GetStringArray return the []string value of a flag with the given name -func (f *FlagSet) GetStringArray(name string) ([]string, error) { - val, err := f.getFlagType(name, "stringArray", stringArrayConv) - if err != nil { - return []string{}, err - } - return val.([]string), nil -} - -// StringArrayVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) { - f.VarP(newStringArrayValue(value, p), name, "", usage) -} - -// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { - f.VarP(newStringArrayValue(value, p), name, shorthand, usage) -} - -// StringArrayVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func StringArrayVar(p *[]string, name string, value []string, usage string) { - CommandLine.VarP(newStringArrayValue(value, p), name, "", usage) -} - -// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. -func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { - CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage) -} - -// StringArray defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string { - p := []string{} - f.StringArrayVarP(&p, name, "", value, usage) - return &p -} - -// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string { - p := []string{} - f.StringArrayVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringArray defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func StringArray(name string, value []string, usage string) *[]string { - return CommandLine.StringArrayP(name, "", value, usage) -} - -// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. -func StringArrayP(name, shorthand string, value []string, usage string) *[]string { - return CommandLine.StringArrayP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go deleted file mode 100644 index 3cb2e69dba..0000000000 --- a/vendor/github.com/spf13/pflag/string_slice.go +++ /dev/null @@ -1,163 +0,0 @@ -package pflag - -import ( - "bytes" - "encoding/csv" - "strings" -) - -// -- stringSlice Value -type stringSliceValue struct { - value *[]string - changed bool -} - -func newStringSliceValue(val []string, p *[]string) *stringSliceValue { - ssv := new(stringSliceValue) - ssv.value = p - *ssv.value = val - return ssv -} - -func readAsCSV(val string) ([]string, error) { - if val == "" { - return []string{}, nil - } - stringReader := strings.NewReader(val) - csvReader := csv.NewReader(stringReader) - return csvReader.Read() -} - -func writeAsCSV(vals []string) (string, error) { - b := &bytes.Buffer{} - w := csv.NewWriter(b) - err := w.Write(vals) - if err != nil { - return "", err - } - w.Flush() - return strings.TrimSuffix(b.String(), "\n"), nil -} - -func (s *stringSliceValue) Set(val string) error { - v, err := readAsCSV(val) - if err != nil { - return err - } - if !s.changed { - *s.value = v - } else { - *s.value = append(*s.value, v...) - } - s.changed = true - return nil -} - -func (s *stringSliceValue) Type() string { - return "stringSlice" -} - -func (s *stringSliceValue) String() string { - str, _ := writeAsCSV(*s.value) - return "[" + str + "]" -} - -func (s *stringSliceValue) Append(val string) error { - *s.value = append(*s.value, val) - return nil -} - -func (s *stringSliceValue) Replace(val []string) error { - *s.value = val - return nil -} - -func (s *stringSliceValue) GetSlice() []string { - return *s.value -} - -func stringSliceConv(sval string) (interface{}, error) { - sval = sval[1 : len(sval)-1] - // An empty string would cause a slice with one (empty) string - if len(sval) == 0 { - return []string{}, nil - } - return readAsCSV(sval) -} - -// GetStringSlice return the []string value of a flag with the given name -func (f *FlagSet) GetStringSlice(name string) ([]string, error) { - val, err := f.getFlagType(name, "stringSlice", stringSliceConv) - if err != nil { - return []string{}, err - } - return val.([]string), nil -} - -// StringSliceVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { - f.VarP(newStringSliceValue(value, p), name, "", usage) -} - -// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { - f.VarP(newStringSliceValue(value, p), name, shorthand, usage) -} - -// StringSliceVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func StringSliceVar(p *[]string, name string, value []string, usage string) { - CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) -} - -// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. -func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { - CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage) -} - -// StringSlice defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { - p := []string{} - f.StringSliceVarP(&p, name, "", value, usage) - return &p -} - -// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string { - p := []string{} - f.StringSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringSlice defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func StringSlice(name string, value []string, usage string) *[]string { - return CommandLine.StringSliceP(name, "", value, usage) -} - -// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. -func StringSliceP(name, shorthand string, value []string, usage string) *[]string { - return CommandLine.StringSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_to_int.go b/vendor/github.com/spf13/pflag/string_to_int.go deleted file mode 100644 index 5ceda3965d..0000000000 --- a/vendor/github.com/spf13/pflag/string_to_int.go +++ /dev/null @@ -1,149 +0,0 @@ -package pflag - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -// -- stringToInt Value -type stringToIntValue struct { - value *map[string]int - changed bool -} - -func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue { - ssv := new(stringToIntValue) - ssv.value = p - *ssv.value = val - return ssv -} - -// Format: a=1,b=2 -func (s *stringToIntValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make(map[string]int, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.Atoi(kv[1]) - if err != nil { - return err - } - } - if !s.changed { - *s.value = out - } else { - for k, v := range out { - (*s.value)[k] = v - } - } - s.changed = true - return nil -} - -func (s *stringToIntValue) Type() string { - return "stringToInt" -} - -func (s *stringToIntValue) String() string { - var buf bytes.Buffer - i := 0 - for k, v := range *s.value { - if i > 0 { - buf.WriteRune(',') - } - buf.WriteString(k) - buf.WriteRune('=') - buf.WriteString(strconv.Itoa(v)) - i++ - } - return "[" + buf.String() + "]" -} - -func stringToIntConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if len(val) == 0 { - return map[string]int{}, nil - } - ss := strings.Split(val, ",") - out := make(map[string]int, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.Atoi(kv[1]) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetStringToInt return the map[string]int value of a flag with the given name -func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) { - val, err := f.getFlagType(name, "stringToInt", stringToIntConv) - if err != nil { - return map[string]int{}, err - } - return val.(map[string]int), nil -} - -// StringToIntVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]int variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { - f.VarP(newStringToIntValue(value, p), name, "", usage) -} - -// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { - f.VarP(newStringToIntValue(value, p), name, shorthand, usage) -} - -// StringToIntVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]int variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { - CommandLine.VarP(newStringToIntValue(value, p), name, "", usage) -} - -// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. -func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { - CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage) -} - -// StringToInt defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int { - p := map[string]int{} - f.StringToIntVarP(&p, name, "", value, usage) - return &p -} - -// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { - p := map[string]int{} - f.StringToIntVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringToInt defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToInt(name string, value map[string]int, usage string) *map[string]int { - return CommandLine.StringToIntP(name, "", value, usage) -} - -// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. -func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { - return CommandLine.StringToIntP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_to_int64.go b/vendor/github.com/spf13/pflag/string_to_int64.go deleted file mode 100644 index a807a04a0b..0000000000 --- a/vendor/github.com/spf13/pflag/string_to_int64.go +++ /dev/null @@ -1,149 +0,0 @@ -package pflag - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -// -- stringToInt64 Value -type stringToInt64Value struct { - value *map[string]int64 - changed bool -} - -func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value { - ssv := new(stringToInt64Value) - ssv.value = p - *ssv.value = val - return ssv -} - -// Format: a=1,b=2 -func (s *stringToInt64Value) Set(val string) error { - ss := strings.Split(val, ",") - out := make(map[string]int64, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) - if err != nil { - return err - } - } - if !s.changed { - *s.value = out - } else { - for k, v := range out { - (*s.value)[k] = v - } - } - s.changed = true - return nil -} - -func (s *stringToInt64Value) Type() string { - return "stringToInt64" -} - -func (s *stringToInt64Value) String() string { - var buf bytes.Buffer - i := 0 - for k, v := range *s.value { - if i > 0 { - buf.WriteRune(',') - } - buf.WriteString(k) - buf.WriteRune('=') - buf.WriteString(strconv.FormatInt(v, 10)) - i++ - } - return "[" + buf.String() + "]" -} - -func stringToInt64Conv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if len(val) == 0 { - return map[string]int64{}, nil - } - ss := strings.Split(val, ",") - out := make(map[string]int64, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetStringToInt64 return the map[string]int64 value of a flag with the given name -func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) { - val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv) - if err != nil { - return map[string]int64{}, err - } - return val.(map[string]int64), nil -} - -// StringToInt64Var defines a string flag with specified name, default value, and usage string. -// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { - f.VarP(newStringToInt64Value(value, p), name, "", usage) -} - -// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { - f.VarP(newStringToInt64Value(value, p), name, shorthand, usage) -} - -// StringToInt64Var defines a string flag with specified name, default value, and usage string. -// The argument p point64s to a map[string]int64 variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { - CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage) -} - -// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. -func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { - CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage) -} - -// StringToInt64 defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int64 variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { - p := map[string]int64{} - f.StringToInt64VarP(&p, name, "", value, usage) - return &p -} - -// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { - p := map[string]int64{} - f.StringToInt64VarP(&p, name, shorthand, value, usage) - return &p -} - -// StringToInt64 defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int64 variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { - return CommandLine.StringToInt64P(name, "", value, usage) -} - -// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. -func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { - return CommandLine.StringToInt64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go deleted file mode 100644 index 890a01afc0..0000000000 --- a/vendor/github.com/spf13/pflag/string_to_string.go +++ /dev/null @@ -1,160 +0,0 @@ -package pflag - -import ( - "bytes" - "encoding/csv" - "fmt" - "strings" -) - -// -- stringToString Value -type stringToStringValue struct { - value *map[string]string - changed bool -} - -func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue { - ssv := new(stringToStringValue) - ssv.value = p - *ssv.value = val - return ssv -} - -// Format: a=1,b=2 -func (s *stringToStringValue) Set(val string) error { - var ss []string - n := strings.Count(val, "=") - switch n { - case 0: - return fmt.Errorf("%s must be formatted as key=value", val) - case 1: - ss = append(ss, strings.Trim(val, `"`)) - default: - r := csv.NewReader(strings.NewReader(val)) - var err error - ss, err = r.Read() - if err != nil { - return err - } - } - - out := make(map[string]string, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return fmt.Errorf("%s must be formatted as key=value", pair) - } - out[kv[0]] = kv[1] - } - if !s.changed { - *s.value = out - } else { - for k, v := range out { - (*s.value)[k] = v - } - } - s.changed = true - return nil -} - -func (s *stringToStringValue) Type() string { - return "stringToString" -} - -func (s *stringToStringValue) String() string { - records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { - records = append(records, k+"="+v) - } - - var buf bytes.Buffer - w := csv.NewWriter(&buf) - if err := w.Write(records); err != nil { - panic(err) - } - w.Flush() - return "[" + strings.TrimSpace(buf.String()) + "]" -} - -func stringToStringConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if len(val) == 0 { - return map[string]string{}, nil - } - r := csv.NewReader(strings.NewReader(val)) - ss, err := r.Read() - if err != nil { - return nil, err - } - out := make(map[string]string, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("%s must be formatted as key=value", pair) - } - out[kv[0]] = kv[1] - } - return out, nil -} - -// GetStringToString return the map[string]string value of a flag with the given name -func (f *FlagSet) GetStringToString(name string) (map[string]string, error) { - val, err := f.getFlagType(name, "stringToString", stringToStringConv) - if err != nil { - return map[string]string{}, err - } - return val.(map[string]string), nil -} - -// StringToStringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]string variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { - f.VarP(newStringToStringValue(value, p), name, "", usage) -} - -// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { - f.VarP(newStringToStringValue(value, p), name, shorthand, usage) -} - -// StringToStringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]string variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { - CommandLine.VarP(newStringToStringValue(value, p), name, "", usage) -} - -// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. -func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { - CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage) -} - -// StringToString defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string { - p := map[string]string{} - f.StringToStringVarP(&p, name, "", value, usage) - return &p -} - -// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { - p := map[string]string{} - f.StringToStringVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringToString defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToString(name string, value map[string]string, usage string) *map[string]string { - return CommandLine.StringToStringP(name, "", value, usage) -} - -// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. -func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { - return CommandLine.StringToStringP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go deleted file mode 100644 index dcbc2b758c..0000000000 --- a/vendor/github.com/spf13/pflag/uint.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint Value -type uintValue uint - -func newUintValue(val uint, p *uint) *uintValue { - *p = val - return (*uintValue)(p) -} - -func (i *uintValue) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uintValue(v) - return err -} - -func (i *uintValue) Type() string { - return "uint" -} - -func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uintConv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 0) - if err != nil { - return 0, err - } - return uint(v), nil -} - -// GetUint return the uint value of a flag with the given name -func (f *FlagSet) GetUint(name string) (uint, error) { - val, err := f.getFlagType(name, "uint", uintConv) - if err != nil { - return 0, err - } - return val.(uint), nil -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, "", usage) -} - -// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func UintVar(p *uint, name string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, "", usage) -} - -// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. -func UintVarP(p *uint, name, shorthand string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint(name string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, "", value, usage) - return p -} - -// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, shorthand, value, usage) - return p -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint(name string, value uint, usage string) *uint { - return CommandLine.UintP(name, "", value, usage) -} - -// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. -func UintP(name, shorthand string, value uint, usage string) *uint { - return CommandLine.UintP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go deleted file mode 100644 index 7e9914eddd..0000000000 --- a/vendor/github.com/spf13/pflag/uint16.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint16 value -type uint16Value uint16 - -func newUint16Value(val uint16, p *uint16) *uint16Value { - *p = val - return (*uint16Value)(p) -} - -func (i *uint16Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 16) - *i = uint16Value(v) - return err -} - -func (i *uint16Value) Type() string { - return "uint16" -} - -func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint16Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 16) - if err != nil { - return 0, err - } - return uint16(v), nil -} - -// GetUint16 return the uint16 value of a flag with the given name -func (f *FlagSet) GetUint16(name string) (uint16, error) { - val, err := f.getFlagType(name, "uint16", uint16Conv) - if err != nil { - return 0, err - } - return val.(uint16), nil -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, "", usage) -} - -// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func Uint16Var(p *uint16, name string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, "", usage) -} - -// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, "", value, usage) - return p -} - -// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, shorthand, value, usage) - return p -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint16(name string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, "", value, usage) -} - -// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. -func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go deleted file mode 100644 index d8024539bf..0000000000 --- a/vendor/github.com/spf13/pflag/uint32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint32 value -type uint32Value uint32 - -func newUint32Value(val uint32, p *uint32) *uint32Value { - *p = val - return (*uint32Value)(p) -} - -func (i *uint32Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 32) - *i = uint32Value(v) - return err -} - -func (i *uint32Value) Type() string { - return "uint32" -} - -func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 32) - if err != nil { - return 0, err - } - return uint32(v), nil -} - -// GetUint32 return the uint32 value of a flag with the given name -func (f *FlagSet) GetUint32(name string) (uint32, error) { - val, err := f.getFlagType(name, "uint32", uint32Conv) - if err != nil { - return 0, err - } - return val.(uint32), nil -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, "", usage) -} - -// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func Uint32Var(p *uint32, name string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, "", usage) -} - -// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, "", value, usage) - return p -} - -// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, shorthand, value, usage) - return p -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func Uint32(name string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, "", value, usage) -} - -// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. -func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go deleted file mode 100644 index f62240f2ce..0000000000 --- a/vendor/github.com/spf13/pflag/uint64.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint64 Value -type uint64Value uint64 - -func newUint64Value(val uint64, p *uint64) *uint64Value { - *p = val - return (*uint64Value)(p) -} - -func (i *uint64Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uint64Value(v) - return err -} - -func (i *uint64Value) Type() string { - return "uint64" -} - -func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint64Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 64) - if err != nil { - return 0, err - } - return uint64(v), nil -} - -// GetUint64 return the uint64 value of a flag with the given name -func (f *FlagSet) GetUint64(name string) (uint64, error) { - val, err := f.getFlagType(name, "uint64", uint64Conv) - if err != nil { - return 0, err - } - return val.(uint64), nil -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, "", usage) -} - -// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func Uint64Var(p *uint64, name string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, "", usage) -} - -// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, "", value, usage) - return p -} - -// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, shorthand, value, usage) - return p -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func Uint64(name string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, "", value, usage) -} - -// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. -func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go deleted file mode 100644 index bb0e83c1f6..0000000000 --- a/vendor/github.com/spf13/pflag/uint8.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint8 Value -type uint8Value uint8 - -func newUint8Value(val uint8, p *uint8) *uint8Value { - *p = val - return (*uint8Value)(p) -} - -func (i *uint8Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 8) - *i = uint8Value(v) - return err -} - -func (i *uint8Value) Type() string { - return "uint8" -} - -func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint8Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 8) - if err != nil { - return 0, err - } - return uint8(v), nil -} - -// GetUint8 return the uint8 value of a flag with the given name -func (f *FlagSet) GetUint8(name string) (uint8, error) { - val, err := f.getFlagType(name, "uint8", uint8Conv) - if err != nil { - return 0, err - } - return val.(uint8), nil -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, "", usage) -} - -// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func Uint8Var(p *uint8, name string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, "", usage) -} - -// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, "", value, usage) - return p -} - -// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, shorthand, value, usage) - return p -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func Uint8(name string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, "", value, usage) -} - -// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. -func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go deleted file mode 100644 index 5fa924835e..0000000000 --- a/vendor/github.com/spf13/pflag/uint_slice.go +++ /dev/null @@ -1,168 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- uintSlice Value -type uintSliceValue struct { - value *[]uint - changed bool -} - -func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue { - uisv := new(uintSliceValue) - uisv.value = p - *uisv.value = val - return uisv -} - -func (s *uintSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]uint, len(ss)) - for i, d := range ss { - u, err := strconv.ParseUint(d, 10, 0) - if err != nil { - return err - } - out[i] = uint(u) - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *uintSliceValue) Type() string { - return "uintSlice" -} - -func (s *uintSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *uintSliceValue) fromString(val string) (uint, error) { - t, err := strconv.ParseUint(val, 10, 0) - if err != nil { - return 0, err - } - return uint(t), nil -} - -func (s *uintSliceValue) toString(val uint) string { - return fmt.Sprintf("%d", val) -} - -func (s *uintSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *uintSliceValue) Replace(val []string) error { - out := make([]uint, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *uintSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func uintSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []uint{}, nil - } - ss := strings.Split(val, ",") - out := make([]uint, len(ss)) - for i, d := range ss { - u, err := strconv.ParseUint(d, 10, 0) - if err != nil { - return nil, err - } - out[i] = uint(u) - } - return out, nil -} - -// GetUintSlice returns the []uint value of a flag with the given name. -func (f *FlagSet) GetUintSlice(name string) ([]uint, error) { - val, err := f.getFlagType(name, "uintSlice", uintSliceConv) - if err != nil { - return []uint{}, err - } - return val.([]uint), nil -} - -// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string. -// The argument p points to a []uint variable in which to store the value of the flag. -func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) { - f.VarP(newUintSliceValue(value, p), name, "", usage) -} - -// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { - f.VarP(newUintSliceValue(value, p), name, shorthand, usage) -} - -// UintSliceVar defines a uint[] flag with specified name, default value, and usage string. -// The argument p points to a uint[] variable in which to store the value of the flag. -func UintSliceVar(p *[]uint, name string, value []uint, usage string) { - CommandLine.VarP(newUintSliceValue(value, p), name, "", usage) -} - -// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash. -func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { - CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage) -} - -// UintSlice defines a []uint flag with specified name, default value, and usage string. -// The return value is the address of a []uint variable that stores the value of the flag. -func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint { - p := []uint{} - f.UintSliceVarP(&p, name, "", value, usage) - return &p -} - -// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { - p := []uint{} - f.UintSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// UintSlice defines a []uint flag with specified name, default value, and usage string. -// The return value is the address of a []uint variable that stores the value of the flag. -func UintSlice(name string, value []uint, usage string) *[]uint { - return CommandLine.UintSliceP(name, "", value, usage) -} - -// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. -func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { - return CommandLine.UintSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/viper/.editorconfig b/vendor/github.com/spf13/viper/.editorconfig deleted file mode 100644 index 1f664d13a5..0000000000 --- a/vendor/github.com/spf13/viper/.editorconfig +++ /dev/null @@ -1,18 +0,0 @@ -root = true - -[*] -charset = utf-8 -end_of_line = lf -indent_size = 4 -indent_style = space -insert_final_newline = true -trim_trailing_whitespace = true - -[*.go] -indent_style = tab - -[{Makefile,*.mk}] -indent_style = tab - -[*.nix] -indent_size = 2 diff --git a/vendor/github.com/spf13/viper/.envrc b/vendor/github.com/spf13/viper/.envrc deleted file mode 100644 index 3ce7171a3c..0000000000 --- a/vendor/github.com/spf13/viper/.envrc +++ /dev/null @@ -1,4 +0,0 @@ -if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8=" -fi -use flake . --impure diff --git a/vendor/github.com/spf13/viper/.gitignore b/vendor/github.com/spf13/viper/.gitignore deleted file mode 100644 index f1bbd42803..0000000000 --- a/vendor/github.com/spf13/viper/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -/.devenv/ -/.direnv/ -/.idea/ -/.pre-commit-config.yaml -/bin/ -/build/ -/var/ -/vendor/ diff --git a/vendor/github.com/spf13/viper/.golangci.yaml b/vendor/github.com/spf13/viper/.golangci.yaml deleted file mode 100644 index 1faeae42c7..0000000000 --- a/vendor/github.com/spf13/viper/.golangci.yaml +++ /dev/null @@ -1,108 +0,0 @@ -run: - timeout: 5m - -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/spf13/viper) - gocritic: - # Enable multiple checks by tags. See "Tags" section in https://github.com/go-critic/go-critic#usage. - enabled-tags: - - diagnostic - - experimental - - opinionated - - style - disabled-checks: - - importShadow - - unnamedResult - golint: - min-confidence: 0 - goimports: - local-prefixes: github.com/spf13/viper - -linters: - disable-all: true - enable: - - bodyclose - - dogsled - - dupl - - durationcheck - - exhaustive - - exportloopref - - gci - - gocritic - - godot - - gofmt - - gofumpt - - goimports - - gomoddirectives - - goprintffuncname - - govet - - importas - - ineffassign - - makezero - - misspell - - nakedret - - nilerr - - noctx - - nolintlint - - prealloc - - predeclared - - revive - - rowserrcheck - - sqlclosecheck - - staticcheck - - stylecheck - - tparallel - - typecheck - - unconvert - - unparam - - unused - - wastedassign - - whitespace - - # fixme - # - cyclop - # - errcheck - # - errorlint - # - exhaustivestruct - # - forbidigo - # - forcetypeassert - # - gochecknoglobals - # - gochecknoinits - # - gocognit - # - goconst - # - gocyclo - # - gosec - # - gosimple - # - ifshort - # - lll - # - nlreturn - # - paralleltest - # - scopelint - # - thelper - # - wrapcheck - - # unused - # - depguard - # - goheader - # - gomodguard - - # deprecated - # - deadcode - # - structcheck - # - varcheck - - # don't enable: - # - asciicheck - # - funlen - # - godox - # - goerr113 - # - gomnd - # - interfacer - # - maligned - # - nestif - # - testpackage - # - wsl diff --git a/vendor/github.com/spf13/viper/.yamlignore b/vendor/github.com/spf13/viper/.yamlignore deleted file mode 100644 index c04c4dead1..0000000000 --- a/vendor/github.com/spf13/viper/.yamlignore +++ /dev/null @@ -1,2 +0,0 @@ -# TODO: FIXME -/.github/ diff --git a/vendor/github.com/spf13/viper/.yamllint.yaml b/vendor/github.com/spf13/viper/.yamllint.yaml deleted file mode 100644 index bac19ce18b..0000000000 --- a/vendor/github.com/spf13/viper/.yamllint.yaml +++ /dev/null @@ -1,6 +0,0 @@ -ignore-from-file: [.gitignore, .yamlignore] - -extends: default - -rules: - line-length: disable diff --git a/vendor/github.com/spf13/viper/LICENSE b/vendor/github.com/spf13/viper/LICENSE deleted file mode 100644 index 4527efb9c0..0000000000 --- a/vendor/github.com/spf13/viper/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Steve Francia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/viper/Makefile b/vendor/github.com/spf13/viper/Makefile deleted file mode 100644 index a77b9c81c1..0000000000 --- a/vendor/github.com/spf13/viper/Makefile +++ /dev/null @@ -1,87 +0,0 @@ -# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html - -OS = $(shell uname | tr A-Z a-z) -export PATH := $(abspath bin/):${PATH} - -# Build variables -BUILD_DIR ?= build -export CGO_ENABLED ?= 0 -export GOOS = $(shell go env GOOS) -ifeq (${VERBOSE}, 1) -ifeq ($(filter -v,${GOARGS}),) - GOARGS += -v -endif -TEST_FORMAT = short-verbose -endif - -# Dependency versions -GOTESTSUM_VERSION = 1.9.0 -GOLANGCI_VERSION = 1.53.3 - -# Add the ability to override some variables -# Use with care --include override.mk - -.PHONY: clear -clear: ## Clear the working area and the project - rm -rf bin/ - -.PHONY: check -check: test lint ## Run tests and linters - - -TEST_PKGS ?= ./... -.PHONY: test -test: TEST_FORMAT ?= short -test: SHELL = /bin/bash -test: export CGO_ENABLED=1 -test: bin/gotestsum ## Run tests - @mkdir -p ${BUILD_DIR} - bin/gotestsum --no-summary=skipped --junitfile ${BUILD_DIR}/coverage.xml --format ${TEST_FORMAT} -- -race -coverprofile=${BUILD_DIR}/coverage.txt -covermode=atomic $(filter-out -v,${GOARGS}) $(if ${TEST_PKGS},${TEST_PKGS},./...) - -.PHONY: lint -lint: lint-go lint-yaml -lint: ## Run linters - -.PHONY: lint-go -lint-go: - golangci-lint run $(if ${CI},--out-format github-actions,) - -.PHONY: lint-yaml -lint-yaml: - yamllint $(if ${CI},-f github,) --no-warnings . - -.PHONY: fmt -fmt: ## Format code - golangci-lint run --fix - -deps: bin/golangci-lint bin/gotestsum yamllint -deps: ## Install dependencies - -bin/gotestsum: - @mkdir -p bin - curl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum && chmod +x ./bin/gotestsum - -bin/golangci-lint: - @mkdir -p bin - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | bash -s -- v${GOLANGCI_VERSION} - -.PHONY: yamllint -yamllint: - pip3 install --user yamllint - -# Add custom targets here --include custom.mk - -.PHONY: list -list: ## List all make targets - @${MAKE} -pRrn : -f $(MAKEFILE_LIST) 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | sort - -.PHONY: help -.DEFAULT_GOAL := help -help: - @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' - -# Variable outputting/exporting rules -var-%: ; @echo $($*) -varexport-%: ; @echo $*=$($*) diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md deleted file mode 100644 index b96180b3b9..0000000000 --- a/vendor/github.com/spf13/viper/README.md +++ /dev/null @@ -1,928 +0,0 @@ -> ## Viper v2 feedback -> Viper is heading towards v2 and we would love to hear what _**you**_ would like to see in it. Share your thoughts here: https://forms.gle/R6faU74qPRPAzchZ9 -> -> **Thank you!** - -![Viper](.github/logo.png?raw=true) - -[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#configuration) -[![run on repl.it](https://repl.it/badge/github/sagikazarmark/Viper-example)](https://repl.it/@sagikazarmark/Viper-example#main.go) - -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) -[![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.19-61CFDD.svg?style=flat-square) -[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) - -**Go configuration with fangs!** - -Many Go projects are built using Viper including: - -* [Hugo](http://gohugo.io) -* [EMC RexRay](http://rexray.readthedocs.org/en/stable/) -* [Imgur’s Incus](https://github.com/Imgur/incus) -* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) -* [Docker Notary](https://github.com/docker/Notary) -* [BloomApi](https://www.bloomapi.com/) -* [doctl](https://github.com/digitalocean/doctl) -* [Clairctl](https://github.com/jgsqware/clairctl) -* [Mercure](https://mercure.rocks) -* [Meshery](https://github.com/meshery/meshery) -* [Bearer](https://github.com/bearer/bearer) -* [Coder](https://github.com/coder/coder) -* [Vitess](https://vitess.io/) - - -## Install - -```shell -go get github.com/spf13/viper -``` - -**Note:** Viper uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies. - - -## What is Viper? - -Viper is a complete configuration solution for Go applications including [12-Factor apps](https://12factor.net/#the_twelve_factors). -It is designed to work within an application, and can handle all types of configuration needs -and formats. It supports: - -* setting defaults -* reading from JSON, TOML, YAML, HCL, envfile and Java properties config files -* live watching and re-reading of config files (optional) -* reading from environment variables -* reading from remote config systems (etcd or Consul), and watching changes -* reading from command line flags -* reading from buffer -* setting explicit values - -Viper can be thought of as a registry for all of your applications configuration needs. - - -## Why Viper? - -When building a modern application, you don’t want to worry about -configuration file formats; you want to focus on building awesome software. -Viper is here to help with that. - -Viper does the following for you: - -1. Find, load, and unmarshal a configuration file in JSON, TOML, YAML, HCL, INI, envfile or Java properties formats. -2. Provide a mechanism to set default values for your different configuration options. -3. Provide a mechanism to set override values for options specified through command line flags. -4. Provide an alias system to easily rename parameters without breaking existing code. -5. Make it easy to tell the difference between when a user has provided a command line or config file which is the same as the default. - -Viper uses the following precedence order. Each item takes precedence over the item below it: - - * explicit call to `Set` - * flag - * env - * config - * key/value store - * default - -**Important:** Viper configuration keys are case insensitive. -There are ongoing discussions about making that optional. - - -## Putting Values into Viper - -### Establishing Defaults - -A good configuration system will support default values. A default value is not -required for a key, but it’s useful in the event that a key hasn't been set via -config file, environment variable, remote configuration or flag. - -Examples: - -```go -viper.SetDefault("ContentDir", "content") -viper.SetDefault("LayoutDir", "layouts") -viper.SetDefault("Taxonomies", map[string]string{"tag": "tags", "category": "categories"}) -``` - -### Reading Config Files - -Viper requires minimal configuration so it knows where to look for config files. -Viper supports JSON, TOML, YAML, HCL, INI, envfile and Java Properties files. Viper can search multiple paths, but -currently a single Viper instance only supports a single configuration file. -Viper does not default to any configuration search paths leaving defaults decision -to an application. - -Here is an example of how to use Viper to search for and read a configuration file. -None of the specific paths are required, but at least one path should be provided -where a configuration file is expected. - -```go -viper.SetConfigName("config") // name of config file (without extension) -viper.SetConfigType("yaml") // REQUIRED if the config file does not have the extension in the name -viper.AddConfigPath("/etc/appname/") // path to look for the config file in -viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search paths -viper.AddConfigPath(".") // optionally look for config in the working directory -err := viper.ReadInConfig() // Find and read the config file -if err != nil { // Handle errors reading the config file - panic(fmt.Errorf("fatal error config file: %w", err)) -} -``` - -You can handle the specific case where no config file is found like this: - -```go -if err := viper.ReadInConfig(); err != nil { - if _, ok := err.(viper.ConfigFileNotFoundError); ok { - // Config file not found; ignore error if desired - } else { - // Config file was found but another error was produced - } -} - -// Config file found and successfully parsed -``` - -*NOTE [since 1.6]:* You can also have a file without an extension and specify the format programmatically. For those configuration files that lie in the home of the user without any extension like `.bashrc` - -### Writing Config Files - -Reading from config files is useful, but at times you want to store all modifications made at run time. -For that, a bunch of commands are available, each with its own purpose: - -* WriteConfig - writes the current viper configuration to the predefined path, if exists. Errors if no predefined path. Will overwrite the current config file, if it exists. -* SafeWriteConfig - writes the current viper configuration to the predefined path. Errors if no predefined path. Will not overwrite the current config file, if it exists. -* WriteConfigAs - writes the current viper configuration to the given filepath. Will overwrite the given file, if it exists. -* SafeWriteConfigAs - writes the current viper configuration to the given filepath. Will not overwrite the given file, if it exists. - -As a rule of the thumb, everything marked with safe won't overwrite any file, but just create if not existent, whilst the default behavior is to create or truncate. - -A small examples section: - -```go -viper.WriteConfig() // writes current config to predefined path set by 'viper.AddConfigPath()' and 'viper.SetConfigName' -viper.SafeWriteConfig() -viper.WriteConfigAs("/path/to/my/.config") -viper.SafeWriteConfigAs("/path/to/my/.config") // will error since it has already been written -viper.SafeWriteConfigAs("/path/to/my/.other_config") -``` - -### Watching and re-reading config files - -Viper supports the ability to have your application live read a config file while running. - -Gone are the days of needing to restart a server to have a config take effect, -viper powered applications can read an update to a config file while running and -not miss a beat. - -Simply tell the viper instance to watchConfig. -Optionally you can provide a function for Viper to run each time a change occurs. - -**Make sure you add all of the configPaths prior to calling `WatchConfig()`** - -```go -viper.OnConfigChange(func(e fsnotify.Event) { - fmt.Println("Config file changed:", e.Name) -}) -viper.WatchConfig() -``` - -### Reading Config from io.Reader - -Viper predefines many configuration sources such as files, environment -variables, flags, and remote K/V store, but you are not bound to them. You can -also implement your own required configuration source and feed it to viper. - -```go -viper.SetConfigType("yaml") // or viper.SetConfigType("YAML") - -// any approach to require this configuration into your program. -var yamlExample = []byte(` -Hacker: true -name: steve -hobbies: -- skateboarding -- snowboarding -- go -clothing: - jacket: leather - trousers: denim -age: 35 -eyes : brown -beard: true -`) - -viper.ReadConfig(bytes.NewBuffer(yamlExample)) - -viper.Get("name") // this would be "steve" -``` - -### Setting Overrides - -These could be from a command line flag, or from your own application logic. - -```go -viper.Set("Verbose", true) -viper.Set("LogFile", LogFile) -viper.Set("host.port", 5899) // set subset -``` - -### Registering and Using Aliases - -Aliases permit a single value to be referenced by multiple keys - -```go -viper.RegisterAlias("loud", "Verbose") - -viper.Set("verbose", true) // same result as next line -viper.Set("loud", true) // same result as prior line - -viper.GetBool("loud") // true -viper.GetBool("verbose") // true -``` - -### Working with Environment Variables - -Viper has full support for environment variables. This enables 12 factor -applications out of the box. There are five methods that exist to aid working -with ENV: - - * `AutomaticEnv()` - * `BindEnv(string...) : error` - * `SetEnvPrefix(string)` - * `SetEnvKeyReplacer(string...) *strings.Replacer` - * `AllowEmptyEnv(bool)` - -_When working with ENV variables, it’s important to recognize that Viper -treats ENV variables as case sensitive._ - -Viper provides a mechanism to try to ensure that ENV variables are unique. By -using `SetEnvPrefix`, you can tell Viper to use a prefix while reading from -the environment variables. Both `BindEnv` and `AutomaticEnv` will use this -prefix. - -`BindEnv` takes one or more parameters. The first parameter is the key name, the -rest are the name of the environment variables to bind to this key. If more than -one are provided, they will take precedence in the specified order. The name of -the environment variable is case sensitive. If the ENV variable name is not provided, then -Viper will automatically assume that the ENV variable matches the following format: prefix + "_" + the key name in ALL CAPS. When you explicitly provide the ENV variable name (the second parameter), -it **does not** automatically add the prefix. For example if the second parameter is "id", -Viper will look for the ENV variable "ID". - -One important thing to recognize when working with ENV variables is that the -value will be read each time it is accessed. Viper does not fix the value when -the `BindEnv` is called. - -`AutomaticEnv` is a powerful helper especially when combined with -`SetEnvPrefix`. When called, Viper will check for an environment variable any -time a `viper.Get` request is made. It will apply the following rules. It will -check for an environment variable with a name matching the key uppercased and -prefixed with the `EnvPrefix` if set. - -`SetEnvKeyReplacer` allows you to use a `strings.Replacer` object to rewrite Env -keys to an extent. This is useful if you want to use `-` or something in your -`Get()` calls, but want your environmental variables to use `_` delimiters. An -example of using it can be found in `viper_test.go`. - -Alternatively, you can use `EnvKeyReplacer` with `NewWithOptions` factory function. -Unlike `SetEnvKeyReplacer`, it accepts a `StringReplacer` interface allowing you to write custom string replacing logic. - -By default empty environment variables are considered unset and will fall back to -the next configuration source. To treat empty environment variables as set, use -the `AllowEmptyEnv` method. - -#### Env example - -```go -SetEnvPrefix("spf") // will be uppercased automatically -BindEnv("id") - -os.Setenv("SPF_ID", "13") // typically done outside of the app - -id := Get("id") // 13 -``` - -### Working with Flags - -Viper has the ability to bind to flags. Specifically, Viper supports `Pflags` -as used in the [Cobra](https://github.com/spf13/cobra) library. - -Like `BindEnv`, the value is not set when the binding method is called, but when -it is accessed. This means you can bind as early as you want, even in an -`init()` function. - -For individual flags, the `BindPFlag()` method provides this functionality. - -Example: - -```go -serverCmd.Flags().Int("port", 1138, "Port to run Application server on") -viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) -``` - -You can also bind an existing set of pflags (pflag.FlagSet): - -Example: - -```go -pflag.Int("flagname", 1234, "help message for flagname") - -pflag.Parse() -viper.BindPFlags(pflag.CommandLine) - -i := viper.GetInt("flagname") // retrieve values from viper instead of pflag -``` - -The use of [pflag](https://github.com/spf13/pflag/) in Viper does not preclude -the use of other packages that use the [flag](https://golang.org/pkg/flag/) -package from the standard library. The pflag package can handle the flags -defined for the flag package by importing these flags. This is accomplished -by a calling a convenience function provided by the pflag package called -AddGoFlagSet(). - -Example: - -```go -package main - -import ( - "flag" - "github.com/spf13/pflag" -) - -func main() { - - // using standard library "flag" package - flag.Int("flagname", 1234, "help message for flagname") - - pflag.CommandLine.AddGoFlagSet(flag.CommandLine) - pflag.Parse() - viper.BindPFlags(pflag.CommandLine) - - i := viper.GetInt("flagname") // retrieve value from viper - - // ... -} -``` - -#### Flag interfaces - -Viper provides two Go interfaces to bind other flag systems if you don’t use `Pflags`. - -`FlagValue` represents a single flag. This is a very simple example on how to implement this interface: - -```go -type myFlag struct {} -func (f myFlag) HasChanged() bool { return false } -func (f myFlag) Name() string { return "my-flag-name" } -func (f myFlag) ValueString() string { return "my-flag-value" } -func (f myFlag) ValueType() string { return "string" } -``` - -Once your flag implements this interface, you can simply tell Viper to bind it: - -```go -viper.BindFlagValue("my-flag-name", myFlag{}) -``` - -`FlagValueSet` represents a group of flags. This is a very simple example on how to implement this interface: - -```go -type myFlagSet struct { - flags []myFlag -} - -func (f myFlagSet) VisitAll(fn func(FlagValue)) { - for _, flag := range flags { - fn(flag) - } -} -``` - -Once your flag set implements this interface, you can simply tell Viper to bind it: - -```go -fSet := myFlagSet{ - flags: []myFlag{myFlag{}, myFlag{}}, -} -viper.BindFlagValues("my-flags", fSet) -``` - -### Remote Key/Value Store Support - -To enable remote support in Viper, do a blank import of the `viper/remote` -package: - -`import _ "github.com/spf13/viper/remote"` - -Viper will read a config string (as JSON, TOML, YAML, HCL or envfile) retrieved from a path -in a Key/Value store such as etcd or Consul. These values take precedence over -default values, but are overridden by configuration values retrieved from disk, -flags, or environment variables. - -Viper supports multiple hosts. To use, pass a list of endpoints separated by `;`. For example `http://127.0.0.1:4001;http://127.0.0.1:4002`. - -Viper uses [crypt](https://github.com/bketelsen/crypt) to retrieve -configuration from the K/V store, which means that you can store your -configuration values encrypted and have them automatically decrypted if you have -the correct gpg keyring. Encryption is optional. - -You can use remote configuration in conjunction with local configuration, or -independently of it. - -`crypt` has a command-line helper that you can use to put configurations in your -K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001. - -```bash -$ go get github.com/bketelsen/crypt/bin/crypt -$ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json -``` - -Confirm that your value was set: - -```bash -$ crypt get -plaintext /config/hugo.json -``` - -See the `crypt` documentation for examples of how to set encrypted values, or -how to use Consul. - -### Remote Key/Value Store Example - Unencrypted - -#### etcd -```go -viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001","/config/hugo.json") -viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" -err := viper.ReadRemoteConfig() -``` - -#### etcd3 -```go -viper.AddRemoteProvider("etcd3", "http://127.0.0.1:4001","/config/hugo.json") -viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" -err := viper.ReadRemoteConfig() -``` - -#### Consul -You need to set a key to Consul key/value storage with JSON value containing your desired config. -For example, create a Consul key/value store key `MY_CONSUL_KEY` with value: - -```json -{ - "port": 8080, - "hostname": "myhostname.com" -} -``` - -```go -viper.AddRemoteProvider("consul", "localhost:8500", "MY_CONSUL_KEY") -viper.SetConfigType("json") // Need to explicitly set this to json -err := viper.ReadRemoteConfig() - -fmt.Println(viper.Get("port")) // 8080 -fmt.Println(viper.Get("hostname")) // myhostname.com -``` - -#### Firestore - -```go -viper.AddRemoteProvider("firestore", "google-cloud-project-id", "collection/document") -viper.SetConfigType("json") // Config's format: "json", "toml", "yaml", "yml" -err := viper.ReadRemoteConfig() -``` - -Of course, you're allowed to use `SecureRemoteProvider` also - - -#### NATS - -```go -viper.AddRemoteProvider("nats", "nats://127.0.0.1:4222", "myapp.config") -viper.SetConfigType("json") -err := viper.ReadRemoteConfig() -``` - -### Remote Key/Value Store Example - Encrypted - -```go -viper.AddSecureRemoteProvider("etcd","http://127.0.0.1:4001","/config/hugo.json","/etc/secrets/mykeyring.gpg") -viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" -err := viper.ReadRemoteConfig() -``` - -### Watching Changes in etcd - Unencrypted - -```go -// alternatively, you can create a new viper instance. -var runtime_viper = viper.New() - -runtime_viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001", "/config/hugo.yml") -runtime_viper.SetConfigType("yaml") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" - -// read from remote config the first time. -err := runtime_viper.ReadRemoteConfig() - -// unmarshal config -runtime_viper.Unmarshal(&runtime_conf) - -// open a goroutine to watch remote changes forever -go func(){ - for { - time.Sleep(time.Second * 5) // delay after each request - - // currently, only tested with etcd support - err := runtime_viper.WatchRemoteConfig() - if err != nil { - log.Errorf("unable to read remote config: %v", err) - continue - } - - // unmarshal new config into our runtime config struct. you can also use channel - // to implement a signal to notify the system of the changes - runtime_viper.Unmarshal(&runtime_conf) - } -}() -``` - -## Getting Values From Viper - -In Viper, there are a few ways to get a value depending on the value’s type. -The following functions and methods exist: - - * `Get(key string) : any` - * `GetBool(key string) : bool` - * `GetFloat64(key string) : float64` - * `GetInt(key string) : int` - * `GetIntSlice(key string) : []int` - * `GetString(key string) : string` - * `GetStringMap(key string) : map[string]any` - * `GetStringMapString(key string) : map[string]string` - * `GetStringSlice(key string) : []string` - * `GetTime(key string) : time.Time` - * `GetDuration(key string) : time.Duration` - * `IsSet(key string) : bool` - * `AllSettings() : map[string]any` - -One important thing to recognize is that each Get function will return a zero -value if it’s not found. To check if a given key exists, the `IsSet()` method -has been provided. - -The zero value will also be returned if the value is set, but fails to parse -as the requested type. - -Example: -```go -viper.GetString("logfile") // case-insensitive Setting & Getting -if viper.GetBool("verbose") { - fmt.Println("verbose enabled") -} -``` -### Accessing nested keys - -The accessor methods also accept formatted paths to deeply nested keys. For -example, if the following JSON file is loaded: - -```json -{ - "host": { - "address": "localhost", - "port": 5799 - }, - "datastore": { - "metric": { - "host": "127.0.0.1", - "port": 3099 - }, - "warehouse": { - "host": "198.0.0.1", - "port": 2112 - } - } -} - -``` - -Viper can access a nested field by passing a `.` delimited path of keys: - -```go -GetString("datastore.metric.host") // (returns "127.0.0.1") -``` - -This obeys the precedence rules established above; the search for the path -will cascade through the remaining configuration registries until found. - -For example, given this configuration file, both `datastore.metric.host` and -`datastore.metric.port` are already defined (and may be overridden). If in addition -`datastore.metric.protocol` was defined in the defaults, Viper would also find it. - -However, if `datastore.metric` was overridden (by a flag, an environment variable, -the `Set()` method, …) with an immediate value, then all sub-keys of -`datastore.metric` become undefined, they are “shadowed” by the higher-priority -configuration level. - -Viper can access array indices by using numbers in the path. For example: - -```jsonc -{ - "host": { - "address": "localhost", - "ports": [ - 5799, - 6029 - ] - }, - "datastore": { - "metric": { - "host": "127.0.0.1", - "port": 3099 - }, - "warehouse": { - "host": "198.0.0.1", - "port": 2112 - } - } -} - -GetInt("host.ports.1") // returns 6029 - -``` - -Lastly, if there exists a key that matches the delimited key path, its value -will be returned instead. E.g. - -```jsonc -{ - "datastore.metric.host": "0.0.0.0", - "host": { - "address": "localhost", - "port": 5799 - }, - "datastore": { - "metric": { - "host": "127.0.0.1", - "port": 3099 - }, - "warehouse": { - "host": "198.0.0.1", - "port": 2112 - } - } -} - -GetString("datastore.metric.host") // returns "0.0.0.0" -``` - -### Extracting a sub-tree - -When developing reusable modules, it's often useful to extract a subset of the configuration -and pass it to a module. This way the module can be instantiated more than once, with different configurations. - -For example, an application might use multiple different cache stores for different purposes: - -```yaml -cache: - cache1: - max-items: 100 - item-size: 64 - cache2: - max-items: 200 - item-size: 80 -``` - -We could pass the cache name to a module (eg. `NewCache("cache1")`), -but it would require weird concatenation for accessing config keys and would be less separated from the global config. - -So instead of doing that let's pass a Viper instance to the constructor that represents a subset of the configuration: - -```go -cache1Config := viper.Sub("cache.cache1") -if cache1Config == nil { // Sub returns nil if the key cannot be found - panic("cache configuration not found") -} - -cache1 := NewCache(cache1Config) -``` - -**Note:** Always check the return value of `Sub`. It returns `nil` if a key cannot be found. - -Internally, the `NewCache` function can address `max-items` and `item-size` keys directly: - -```go -func NewCache(v *Viper) *Cache { - return &Cache{ - MaxItems: v.GetInt("max-items"), - ItemSize: v.GetInt("item-size"), - } -} -``` - -The resulting code is easy to test, since it's decoupled from the main config structure, -and easier to reuse (for the same reason). - - -### Unmarshaling - -You also have the option of Unmarshaling all or a specific value to a struct, map, -etc. - -There are two methods to do this: - - * `Unmarshal(rawVal any) : error` - * `UnmarshalKey(key string, rawVal any) : error` - -Example: - -```go -type config struct { - Port int - Name string - PathMap string `mapstructure:"path_map"` -} - -var C config - -err := viper.Unmarshal(&C) -if err != nil { - t.Fatalf("unable to decode into struct, %v", err) -} -``` - -If you want to unmarshal configuration where the keys themselves contain dot (the default key delimiter), -you have to change the delimiter: - -```go -v := viper.NewWithOptions(viper.KeyDelimiter("::")) - -v.SetDefault("chart::values", map[string]any{ - "ingress": map[string]any{ - "annotations": map[string]any{ - "traefik.frontend.rule.type": "PathPrefix", - "traefik.ingress.kubernetes.io/ssl-redirect": "true", - }, - }, -}) - -type config struct { - Chart struct{ - Values map[string]any - } -} - -var C config - -v.Unmarshal(&C) -``` - -Viper also supports unmarshaling into embedded structs: - -```go -/* -Example config: - -module: - enabled: true - token: 89h3f98hbwf987h3f98wenf89ehf -*/ -type config struct { - Module struct { - Enabled bool - - moduleConfig `mapstructure:",squash"` - } -} - -// moduleConfig could be in a module specific package -type moduleConfig struct { - Token string -} - -var C config - -err := viper.Unmarshal(&C) -if err != nil { - t.Fatalf("unable to decode into struct, %v", err) -} -``` - -Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. - -### Decoding custom formats - -A frequently requested feature for Viper is adding more value formats and decoders. -For example, parsing character (dot, comma, semicolon, etc) separated strings into slices. - -This is already available in Viper using mapstructure decode hooks. - -Read more about the details in [this blog post](https://sagikazarmark.hu/blog/decoding-custom-formats-with-viper/). - -### Marshalling to string - -You may need to marshal all the settings held in viper into a string rather than write them to a file. -You can use your favorite format's marshaller with the config returned by `AllSettings()`. - -```go -import ( - yaml "gopkg.in/yaml.v2" - // ... -) - -func yamlStringSettings() string { - c := viper.AllSettings() - bs, err := yaml.Marshal(c) - if err != nil { - log.Fatalf("unable to marshal config to YAML: %v", err) - } - return string(bs) -} -``` - -## Viper or Vipers? - -Viper comes ready to use out of the box. There is no configuration or -initialization needed to begin using Viper. Since most applications will want -to use a single central repository for their configuration, the viper package -provides this. It is similar to a singleton. - -In all of the examples above, they demonstrate using viper in its singleton -style approach. - -### Working with multiple vipers - -You can also create many different vipers for use in your application. Each will -have its own unique set of configurations and values. Each can read from a -different config file, key value store, etc. All of the functions that viper -package supports are mirrored as methods on a viper. - -Example: - -```go -x := viper.New() -y := viper.New() - -x.SetDefault("ContentDir", "content") -y.SetDefault("ContentDir", "foobar") - -//... -``` - -When working with multiple vipers, it is up to the user to keep track of the -different vipers. - - -## Q & A - -### Why is it called “Viper”? - -A: Viper is designed to be a [companion](http://en.wikipedia.org/wiki/Viper_(G.I._Joe)) -to [Cobra](https://github.com/spf13/cobra). While both can operate completely -independently, together they make a powerful pair to handle much of your -application foundation needs. - -### Why is it called “Cobra”? - -Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)? - -### Does Viper support case sensitive keys? - -**tl;dr:** No. - -Viper merges configuration from various sources, many of which are either case insensitive or uses different casing than the rest of the sources (eg. env vars). -In order to provide the best experience when using multiple sources, the decision has been made to make all keys case insensitive. - -There has been several attempts to implement case sensitivity, but unfortunately it's not that trivial. We might take a stab at implementing it in [Viper v2](https://github.com/spf13/viper/issues/772), but despite the initial noise, it does not seem to be requested that much. - -You can vote for case sensitivity by filling out this feedback form: https://forms.gle/R6faU74qPRPAzchZ9 - -### Is it safe to concurrently read and write to a viper? - -No, you will need to synchronize access to the viper yourself (for example by using the `sync` package). Concurrent reads and writes can cause a panic. - -## Troubleshooting - -See [TROUBLESHOOTING.md](TROUBLESHOOTING.md). - -## Development - -**For an optimal developer experience, it is recommended to install [Nix](https://nixos.org/download.html) and [direnv](https://direnv.net/docs/installation.html).** - -_Alternatively, install [Go](https://go.dev/dl/) on your computer then run `make deps` to install the rest of the dependencies._ - -Run the test suite: - -```shell -make test -``` - -Run linters: - -```shell -make lint # pass -j option to run them in parallel -``` - -Some linter violations can automatically be fixed: - -```shell -make fmt -``` - -## License - -The project is licensed under the [MIT License](LICENSE). diff --git a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md deleted file mode 100644 index c4e36c6860..0000000000 --- a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md +++ /dev/null @@ -1,32 +0,0 @@ -# Troubleshooting - -## Unmarshaling doesn't work - -The most common reason for this issue is improper use of struct tags (eg. `yaml` or `json`). Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. Please refer to the library's documentation for using other struct tags. - -## Cannot find package - -Viper installation seems to fail a lot lately with the following (or a similar) error: - -``` -cannot find package "github.com/hashicorp/hcl/tree/hcl1" in any of: -/usr/local/Cellar/go/1.15.7_1/libexec/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOROOT) -/Users/user/go/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOPATH) -``` - -As the error message suggests, Go tries to look up dependencies in `GOPATH` mode (as it's commonly called) from the `GOPATH`. -Viper opted to use [Go Modules](https://github.com/golang/go/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). - -The solution is easy: switch to using Go Modules. -Please refer to the [wiki](https://github.com/golang/go/wiki/Modules) on how to do that. - -**tl;dr* `export GO111MODULE=on` - -## Unquoted 'y' and 'n' characters get replaced with _true_ and _false_ when reading a YAML file - -This is a YAML 1.1 feature according to [go-yaml/yaml#740](https://github.com/go-yaml/yaml/issues/740). - -Potential solutions are: - -1. Quoting values resolved as boolean -1. Upgrading to YAML v3 (for the time being this is possible by passing the `viper_yaml3` tag to your build) diff --git a/vendor/github.com/spf13/viper/file.go b/vendor/github.com/spf13/viper/file.go deleted file mode 100644 index a54fe5a7a8..0000000000 --- a/vendor/github.com/spf13/viper/file.go +++ /dev/null @@ -1,56 +0,0 @@ -//go:build !finder - -package viper - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/spf13/afero" -) - -// Search all configPaths for any config file. -// Returns the first path that exists (and is a config file). -func (v *Viper) findConfigFile() (string, error) { - v.logger.Info("searching for config in paths", "paths", v.configPaths) - - for _, cp := range v.configPaths { - file := v.searchInPath(cp) - if file != "" { - return file, nil - } - } - return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} -} - -func (v *Viper) searchInPath(in string) (filename string) { - v.logger.Debug("searching for config in path", "path", in) - for _, ext := range SupportedExts { - v.logger.Debug("checking if file exists", "file", filepath.Join(in, v.configName+"."+ext)) - if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { - v.logger.Debug("found file", "file", filepath.Join(in, v.configName+"."+ext)) - return filepath.Join(in, v.configName+"."+ext) - } - } - - if v.configType != "" { - if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b { - return filepath.Join(in, v.configName) - } - } - - return "" -} - -// exists checks if file exists. -func exists(fs afero.Fs, path string) (bool, error) { - stat, err := fs.Stat(path) - if err == nil { - return !stat.IsDir(), nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} diff --git a/vendor/github.com/spf13/viper/file_finder.go b/vendor/github.com/spf13/viper/file_finder.go deleted file mode 100644 index d96a1bd223..0000000000 --- a/vendor/github.com/spf13/viper/file_finder.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build finder - -package viper - -import ( - "fmt" - - "github.com/sagikazarmark/locafero" -) - -// Search all configPaths for any config file. -// Returns the first path that exists (and is a config file). -func (v *Viper) findConfigFile() (string, error) { - var names []string - - if v.configType != "" { - names = locafero.NameWithOptionalExtensions(v.configName, SupportedExts...) - } else { - names = locafero.NameWithExtensions(v.configName, SupportedExts...) - } - - finder := locafero.Finder{ - Paths: v.configPaths, - Names: names, - Type: locafero.FileTypeFile, - } - - results, err := finder.Find(v.fs) - if err != nil { - return "", err - } - - if len(results) == 0 { - return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} - } - - return results[0], nil -} diff --git a/vendor/github.com/spf13/viper/flags.go b/vendor/github.com/spf13/viper/flags.go deleted file mode 100644 index de033ed58f..0000000000 --- a/vendor/github.com/spf13/viper/flags.go +++ /dev/null @@ -1,57 +0,0 @@ -package viper - -import "github.com/spf13/pflag" - -// FlagValueSet is an interface that users can implement -// to bind a set of flags to viper. -type FlagValueSet interface { - VisitAll(fn func(FlagValue)) -} - -// FlagValue is an interface that users can implement -// to bind different flags to viper. -type FlagValue interface { - HasChanged() bool - Name() string - ValueString() string - ValueType() string -} - -// pflagValueSet is a wrapper around *pflag.ValueSet -// that implements FlagValueSet. -type pflagValueSet struct { - flags *pflag.FlagSet -} - -// VisitAll iterates over all *pflag.Flag inside the *pflag.FlagSet. -func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) { - p.flags.VisitAll(func(flag *pflag.Flag) { - fn(pflagValue{flag}) - }) -} - -// pflagValue is a wrapper around *pflag.flag -// that implements FlagValue. -type pflagValue struct { - flag *pflag.Flag -} - -// HasChanged returns whether the flag has changes or not. -func (p pflagValue) HasChanged() bool { - return p.flag.Changed -} - -// Name returns the name of the flag. -func (p pflagValue) Name() string { - return p.flag.Name -} - -// ValueString returns the value of the flag as a string. -func (p pflagValue) ValueString() string { - return p.flag.Value.String() -} - -// ValueType returns the type of the flag as a string. -func (p pflagValue) ValueType() string { - return p.flag.Value.Type() -} diff --git a/vendor/github.com/spf13/viper/flake.lock b/vendor/github.com/spf13/viper/flake.lock deleted file mode 100644 index 78da51090b..0000000000 --- a/vendor/github.com/spf13/viper/flake.lock +++ /dev/null @@ -1,255 +0,0 @@ -{ - "nodes": { - "devenv": { - "inputs": { - "flake-compat": "flake-compat", - "nix": "nix", - "nixpkgs": "nixpkgs", - "pre-commit-hooks": "pre-commit-hooks" - }, - "locked": { - "lastModified": 1687972261, - "narHash": "sha256-+mxvZfwMVoaZYETmuQWqTi/7T9UKoAE+WpdSQkOVJ2g=", - "owner": "cachix", - "repo": "devenv", - "rev": "e85df562088573305e55906eaa964341f8cb0d9f", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-parts": { - "inputs": { - "nixpkgs-lib": "nixpkgs-lib" - }, - "locked": { - "lastModified": 1687762428, - "narHash": "sha256-DIf7mi45PKo+s8dOYF+UlXHzE0Wl/+k3tXUyAoAnoGE=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "37dd7bb15791c86d55c5121740a1887ab55ee836", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "flake-utils": { - "locked": { - "lastModified": 1667395993, - "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "gitignore": { - "inputs": { - "nixpkgs": [ - "devenv", - "pre-commit-hooks", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1660459072, - "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", - "owner": "hercules-ci", - "repo": "gitignore.nix", - "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "gitignore.nix", - "type": "github" - } - }, - "lowdown-src": { - "flake": false, - "locked": { - "lastModified": 1633514407, - "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", - "owner": "kristapsdz", - "repo": "lowdown", - "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", - "type": "github" - }, - "original": { - "owner": "kristapsdz", - "repo": "lowdown", - "type": "github" - } - }, - "nix": { - "inputs": { - "lowdown-src": "lowdown-src", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression" - }, - "locked": { - "lastModified": 1676545802, - "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", - "owner": "domenkozar", - "repo": "nix", - "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "relaxed-flakes", - "repo": "nix", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1678875422, - "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "dir": "lib", - "lastModified": 1685564631, - "narHash": "sha256-8ywr3AkblY4++3lIVxmrWZFzac7+f32ZEhH/A8pNscI=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "4f53efe34b3a8877ac923b9350c874e3dcd5dc0a", - "type": "github" - }, - "original": { - "dir": "lib", - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-stable": { - "locked": { - "lastModified": 1678872516, - "narHash": "sha256-/E1YwtMtFAu2KUQKV/1+KFuReYPANM2Rzehk84VxVoc=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "9b8e5abb18324c7fe9f07cb100c3cd4a29cda8b8", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-22.11", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1687886075, - "narHash": "sha256-PeayJDDDy+uw1Ats4moZnRdL1OFuZm1Tj+KiHlD67+o=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "a565059a348422af5af9026b5174dc5c0dcefdae", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "flake-utils": "flake-utils", - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable" - }, - "locked": { - "lastModified": 1686050334, - "narHash": "sha256-R0mczWjDzBpIvM3XXhO908X5e2CQqjyh/gFbwZk/7/Q=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "6881eb2ae5d8a3516e34714e7a90d9d95914c4dc", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "type": "github" - } - }, - "root": { - "inputs": { - "devenv": "devenv", - "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_2" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/vendor/github.com/spf13/viper/flake.nix b/vendor/github.com/spf13/viper/flake.nix deleted file mode 100644 index 9b26c3fcf5..0000000000 --- a/vendor/github.com/spf13/viper/flake.nix +++ /dev/null @@ -1,56 +0,0 @@ -{ - description = "Viper"; - - inputs = { - nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; - flake-parts.url = "github:hercules-ci/flake-parts"; - devenv.url = "github:cachix/devenv"; - }; - - outputs = inputs@{ flake-parts, ... }: - flake-parts.lib.mkFlake { inherit inputs; } { - imports = [ - inputs.devenv.flakeModule - ]; - - systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; - - perSystem = { config, self', inputs', pkgs, system, ... }: rec { - devenv.shells = { - default = { - languages = { - go.enable = true; - }; - - pre-commit.hooks = { - nixpkgs-fmt.enable = true; - yamllint.enable = true; - }; - - packages = with pkgs; [ - gnumake - - golangci-lint - yamllint - ]; - - scripts = { - versions.exec = '' - go version - golangci-lint version - ''; - }; - - enterShell = '' - versions - ''; - - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; - }; - - ci = devenv.shells.default; - }; - }; - }; -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/decoder.go b/vendor/github.com/spf13/viper/internal/encoding/decoder.go deleted file mode 100644 index 8a7b1dbc91..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/decoder.go +++ /dev/null @@ -1,61 +0,0 @@ -package encoding - -import ( - "sync" -) - -// Decoder decodes the contents of b into v. -// It's primarily used for decoding contents of a file into a map[string]any. -type Decoder interface { - Decode(b []byte, v map[string]any) error -} - -const ( - // ErrDecoderNotFound is returned when there is no decoder registered for a format. - ErrDecoderNotFound = encodingError("decoder not found for this format") - - // ErrDecoderFormatAlreadyRegistered is returned when an decoder is already registered for a format. - ErrDecoderFormatAlreadyRegistered = encodingError("decoder already registered for this format") -) - -// DecoderRegistry can choose an appropriate Decoder based on the provided format. -type DecoderRegistry struct { - decoders map[string]Decoder - - mu sync.RWMutex -} - -// NewDecoderRegistry returns a new, initialized DecoderRegistry. -func NewDecoderRegistry() *DecoderRegistry { - return &DecoderRegistry{ - decoders: make(map[string]Decoder), - } -} - -// RegisterDecoder registers a Decoder for a format. -// Registering a Decoder for an already existing format is not supported. -func (e *DecoderRegistry) RegisterDecoder(format string, enc Decoder) error { - e.mu.Lock() - defer e.mu.Unlock() - - if _, ok := e.decoders[format]; ok { - return ErrDecoderFormatAlreadyRegistered - } - - e.decoders[format] = enc - - return nil -} - -// Decode calls the underlying Decoder based on the format. -func (e *DecoderRegistry) Decode(format string, b []byte, v map[string]any) error { - e.mu.RLock() - decoder, ok := e.decoders[format] - e.mu.RUnlock() - - if !ok { - return ErrDecoderNotFound - } - - return decoder.Decode(b, v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go b/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go deleted file mode 100644 index 3ebc76f029..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go +++ /dev/null @@ -1,61 +0,0 @@ -package dotenv - -import ( - "bytes" - "fmt" - "sort" - "strings" - - "github.com/subosito/gotenv" -) - -const keyDelimiter = "_" - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for encoding data containing environment variables -// (commonly called as dotenv format). -type Codec struct{} - -func (Codec) Encode(v map[string]any) ([]byte, error) { - flattened := map[string]any{} - - flattened = flattenAndMergeMap(flattened, v, "", keyDelimiter) - - keys := make([]string, 0, len(flattened)) - - for key := range flattened { - keys = append(keys, key) - } - - sort.Strings(keys) - - var buf bytes.Buffer - - for _, key := range keys { - _, err := buf.WriteString(fmt.Sprintf("%v=%v\n", strings.ToUpper(key), flattened[key])) - if err != nil { - return nil, err - } - } - - return buf.Bytes(), nil -} - -func (Codec) Decode(b []byte, v map[string]any) error { - var buf bytes.Buffer - - _, err := buf.Write(b) - if err != nil { - return err - } - - env, err := gotenv.StrictParse(&buf) - if err != nil { - return err - } - - for key, value := range env { - v[key] = value - } - - return nil -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go deleted file mode 100644 index 8bfe0a9de2..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go +++ /dev/null @@ -1,41 +0,0 @@ -package dotenv - -import ( - "strings" - - "github.com/spf13/cast" -) - -// flattenAndMergeMap recursively flattens the given map into a new map -// Code is based on the function with the same name in the main package. -// TODO: move it to a common place. -func flattenAndMergeMap(shadow, m map[string]any, prefix, delimiter string) map[string]any { - if shadow != nil && prefix != "" && shadow[prefix] != nil { - // prefix is shadowed => nothing more to flatten - return shadow - } - if shadow == nil { - shadow = make(map[string]any) - } - - var m2 map[string]any - if prefix != "" { - prefix += delimiter - } - for k, val := range m { - fullKey := prefix + k - switch val := val.(type) { - case map[string]any: - m2 = val - case map[any]any: - m2 = cast.ToStringMap(val) - default: - // immediate value - shadow[strings.ToLower(fullKey)] = val - continue - } - // recursively merge to shadow map - shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) - } - return shadow -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/encoder.go b/vendor/github.com/spf13/viper/internal/encoding/encoder.go deleted file mode 100644 index 659585962c..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/encoder.go +++ /dev/null @@ -1,60 +0,0 @@ -package encoding - -import ( - "sync" -) - -// Encoder encodes the contents of v into a byte representation. -// It's primarily used for encoding a map[string]any into a file format. -type Encoder interface { - Encode(v map[string]any) ([]byte, error) -} - -const ( - // ErrEncoderNotFound is returned when there is no encoder registered for a format. - ErrEncoderNotFound = encodingError("encoder not found for this format") - - // ErrEncoderFormatAlreadyRegistered is returned when an encoder is already registered for a format. - ErrEncoderFormatAlreadyRegistered = encodingError("encoder already registered for this format") -) - -// EncoderRegistry can choose an appropriate Encoder based on the provided format. -type EncoderRegistry struct { - encoders map[string]Encoder - - mu sync.RWMutex -} - -// NewEncoderRegistry returns a new, initialized EncoderRegistry. -func NewEncoderRegistry() *EncoderRegistry { - return &EncoderRegistry{ - encoders: make(map[string]Encoder), - } -} - -// RegisterEncoder registers an Encoder for a format. -// Registering a Encoder for an already existing format is not supported. -func (e *EncoderRegistry) RegisterEncoder(format string, enc Encoder) error { - e.mu.Lock() - defer e.mu.Unlock() - - if _, ok := e.encoders[format]; ok { - return ErrEncoderFormatAlreadyRegistered - } - - e.encoders[format] = enc - - return nil -} - -func (e *EncoderRegistry) Encode(format string, v map[string]any) ([]byte, error) { - e.mu.RLock() - encoder, ok := e.encoders[format] - e.mu.RUnlock() - - if !ok { - return nil, ErrEncoderNotFound - } - - return encoder.Encode(v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/error.go b/vendor/github.com/spf13/viper/internal/encoding/error.go deleted file mode 100644 index e4cde02d7b..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/error.go +++ /dev/null @@ -1,7 +0,0 @@ -package encoding - -type encodingError string - -func (e encodingError) Error() string { - return string(e) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go deleted file mode 100644 index d7fa8a1b7a..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go +++ /dev/null @@ -1,40 +0,0 @@ -package hcl - -import ( - "bytes" - "encoding/json" - - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/printer" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for HCL encoding. -// TODO: add printer config to the codec? -type Codec struct{} - -func (Codec) Encode(v map[string]any) ([]byte, error) { - b, err := json.Marshal(v) - if err != nil { - return nil, err - } - - // TODO: use printer.Format? Is the trailing newline an issue? - - ast, err := hcl.Parse(string(b)) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - - err = printer.Fprint(&buf, ast.Node) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (Codec) Decode(b []byte, v map[string]any) error { - return hcl.Unmarshal(b, &v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go b/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go deleted file mode 100644 index d91cf59d2b..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go +++ /dev/null @@ -1,99 +0,0 @@ -package ini - -import ( - "bytes" - "sort" - "strings" - - "github.com/spf13/cast" - "gopkg.in/ini.v1" -) - -// LoadOptions contains all customized options used for load data source(s). -// This type is added here for convenience: this way consumers can import a single package called "ini". -type LoadOptions = ini.LoadOptions - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for INI encoding. -type Codec struct { - KeyDelimiter string - LoadOptions LoadOptions -} - -func (c Codec) Encode(v map[string]any) ([]byte, error) { - cfg := ini.Empty() - ini.PrettyFormat = false - - flattened := map[string]any{} - - flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) - - keys := make([]string, 0, len(flattened)) - - for key := range flattened { - keys = append(keys, key) - } - - sort.Strings(keys) - - for _, key := range keys { - sectionName, keyName := "", key - - lastSep := strings.LastIndex(key, ".") - if lastSep != -1 { - sectionName = key[:(lastSep)] - keyName = key[(lastSep + 1):] - } - - // TODO: is this a good idea? - if sectionName == "default" { - sectionName = "" - } - - cfg.Section(sectionName).Key(keyName).SetValue(cast.ToString(flattened[key])) - } - - var buf bytes.Buffer - - _, err := cfg.WriteTo(&buf) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (c Codec) Decode(b []byte, v map[string]any) error { - cfg := ini.Empty(c.LoadOptions) - - err := cfg.Append(b) - if err != nil { - return err - } - - sections := cfg.Sections() - - for i := 0; i < len(sections); i++ { - section := sections[i] - keys := section.Keys() - - for j := 0; j < len(keys); j++ { - key := keys[j] - value := cfg.Section(section.Name()).Key(key.Name()).String() - - deepestMap := deepSearch(v, strings.Split(section.Name(), c.keyDelimiter())) - - // set innermost value - deepestMap[key.Name()] = value - } - } - - return nil -} - -func (c Codec) keyDelimiter() string { - if c.KeyDelimiter == "" { - return "." - } - - return c.KeyDelimiter -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go deleted file mode 100644 index 490ab594ec..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go +++ /dev/null @@ -1,74 +0,0 @@ -package ini - -import ( - "strings" - - "github.com/spf13/cast" -) - -// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED -// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE -// deepSearch scans deep maps, following the key indexes listed in the -// sequence "path". -// The last value is expected to be another map, and is returned. -// -// In case intermediate keys do not exist, or map to a non-map value, -// a new map is created and inserted, and the search continues from there: -// the initial map "m" may be modified! -func deepSearch(m map[string]any, path []string) map[string]any { - for _, k := range path { - m2, ok := m[k] - if !ok { - // intermediate key does not exist - // => create it and continue from there - m3 := make(map[string]any) - m[k] = m3 - m = m3 - continue - } - m3, ok := m2.(map[string]any) - if !ok { - // intermediate key is a value - // => replace with a new map - m3 = make(map[string]any) - m[k] = m3 - } - // continue search from here - m = m3 - } - return m -} - -// flattenAndMergeMap recursively flattens the given map into a new map -// Code is based on the function with the same name in the main package. -// TODO: move it to a common place. -func flattenAndMergeMap(shadow, m map[string]any, prefix, delimiter string) map[string]any { - if shadow != nil && prefix != "" && shadow[prefix] != nil { - // prefix is shadowed => nothing more to flatten - return shadow - } - if shadow == nil { - shadow = make(map[string]any) - } - - var m2 map[string]any - if prefix != "" { - prefix += delimiter - } - for k, val := range m { - fullKey := prefix + k - switch val := val.(type) { - case map[string]any: - m2 = val - case map[any]any: - m2 = cast.ToStringMap(val) - default: - // immediate value - shadow[strings.ToLower(fullKey)] = val - continue - } - // recursively merge to shadow map - shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) - } - return shadow -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go deleted file mode 100644 index e92e5172c1..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go +++ /dev/null @@ -1,86 +0,0 @@ -package javaproperties - -import ( - "bytes" - "sort" - "strings" - - "github.com/magiconair/properties" - "github.com/spf13/cast" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for Java properties encoding. -type Codec struct { - KeyDelimiter string - - // Store read properties on the object so that we can write back in order with comments. - // This will only be used if the configuration read is a properties file. - // TODO: drop this feature in v2 - // TODO: make use of the global properties object optional - Properties *properties.Properties -} - -func (c *Codec) Encode(v map[string]any) ([]byte, error) { - if c.Properties == nil { - c.Properties = properties.NewProperties() - } - - flattened := map[string]any{} - - flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) - - keys := make([]string, 0, len(flattened)) - - for key := range flattened { - keys = append(keys, key) - } - - sort.Strings(keys) - - for _, key := range keys { - _, _, err := c.Properties.Set(key, cast.ToString(flattened[key])) - if err != nil { - return nil, err - } - } - - var buf bytes.Buffer - - _, err := c.Properties.WriteComment(&buf, "#", properties.UTF8) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (c *Codec) Decode(b []byte, v map[string]any) error { - var err error - c.Properties, err = properties.Load(b, properties.UTF8) - if err != nil { - return err - } - - for _, key := range c.Properties.Keys() { - // ignore existence check: we know it's there - value, _ := c.Properties.Get(key) - - // recursively build nested maps - path := strings.Split(key, c.keyDelimiter()) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(v, path[0:len(path)-1]) - - // set innermost value - deepestMap[lastKey] = value - } - - return nil -} - -func (c Codec) keyDelimiter() string { - if c.KeyDelimiter == "" { - return "." - } - - return c.KeyDelimiter -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go deleted file mode 100644 index 6e1aff2236..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go +++ /dev/null @@ -1,74 +0,0 @@ -package javaproperties - -import ( - "strings" - - "github.com/spf13/cast" -) - -// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED -// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE -// deepSearch scans deep maps, following the key indexes listed in the -// sequence "path". -// The last value is expected to be another map, and is returned. -// -// In case intermediate keys do not exist, or map to a non-map value, -// a new map is created and inserted, and the search continues from there: -// the initial map "m" may be modified! -func deepSearch(m map[string]any, path []string) map[string]any { - for _, k := range path { - m2, ok := m[k] - if !ok { - // intermediate key does not exist - // => create it and continue from there - m3 := make(map[string]any) - m[k] = m3 - m = m3 - continue - } - m3, ok := m2.(map[string]any) - if !ok { - // intermediate key is a value - // => replace with a new map - m3 = make(map[string]any) - m[k] = m3 - } - // continue search from here - m = m3 - } - return m -} - -// flattenAndMergeMap recursively flattens the given map into a new map -// Code is based on the function with the same name in the main package. -// TODO: move it to a common place. -func flattenAndMergeMap(shadow, m map[string]any, prefix, delimiter string) map[string]any { - if shadow != nil && prefix != "" && shadow[prefix] != nil { - // prefix is shadowed => nothing more to flatten - return shadow - } - if shadow == nil { - shadow = make(map[string]any) - } - - var m2 map[string]any - if prefix != "" { - prefix += delimiter - } - for k, val := range m { - fullKey := prefix + k - switch val := val.(type) { - case map[string]any: - m2 = val - case map[any]any: - m2 = cast.ToStringMap(val) - default: - // immediate value - shadow[strings.ToLower(fullKey)] = val - continue - } - // recursively merge to shadow map - shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) - } - return shadow -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/json/codec.go b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go deleted file mode 100644 index da7546b5a1..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/json/codec.go +++ /dev/null @@ -1,17 +0,0 @@ -package json - -import ( - "encoding/json" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for JSON encoding. -type Codec struct{} - -func (Codec) Encode(v map[string]any) ([]byte, error) { - // TODO: expose prefix and indent in the Codec as setting? - return json.MarshalIndent(v, "", " ") -} - -func (Codec) Decode(b []byte, v map[string]any) error { - return json.Unmarshal(b, &v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go deleted file mode 100644 index c70aa8d280..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go +++ /dev/null @@ -1,16 +0,0 @@ -package toml - -import ( - "github.com/pelletier/go-toml/v2" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. -type Codec struct{} - -func (Codec) Encode(v map[string]any) ([]byte, error) { - return toml.Marshal(v) -} - -func (Codec) Decode(b []byte, v map[string]any) error { - return toml.Unmarshal(b, &v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go deleted file mode 100644 index 0368792499..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go +++ /dev/null @@ -1,14 +0,0 @@ -package yaml - -import "gopkg.in/yaml.v3" - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding. -type Codec struct{} - -func (Codec) Encode(v map[string]any) ([]byte, error) { - return yaml.Marshal(v) -} - -func (Codec) Decode(b []byte, v map[string]any) error { - return yaml.Unmarshal(b, &v) -} diff --git a/vendor/github.com/spf13/viper/internal/features/bind_struct.go b/vendor/github.com/spf13/viper/internal/features/bind_struct.go deleted file mode 100644 index 89302c2164..0000000000 --- a/vendor/github.com/spf13/viper/internal/features/bind_struct.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build viper_bind_struct - -package features - -const BindStruct = true diff --git a/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go b/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go deleted file mode 100644 index edfaf73b64..0000000000 --- a/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build !viper_bind_struct - -package features - -const BindStruct = false diff --git a/vendor/github.com/spf13/viper/logger.go b/vendor/github.com/spf13/viper/logger.go deleted file mode 100644 index 8938053b31..0000000000 --- a/vendor/github.com/spf13/viper/logger.go +++ /dev/null @@ -1,68 +0,0 @@ -package viper - -import ( - "context" - - slog "github.com/sagikazarmark/slog-shim" -) - -// Logger is a unified interface for various logging use cases and practices, including: -// - leveled logging -// - structured logging -// -// Deprecated: use `log/slog` instead. -type Logger interface { - // Trace logs a Trace event. - // - // Even more fine-grained information than Debug events. - // Loggers not supporting this level should fall back to Debug. - Trace(msg string, keyvals ...any) - - // Debug logs a Debug event. - // - // A verbose series of information events. - // They are useful when debugging the system. - Debug(msg string, keyvals ...any) - - // Info logs an Info event. - // - // General information about what's happening inside the system. - Info(msg string, keyvals ...any) - - // Warn logs a Warn(ing) event. - // - // Non-critical events that should be looked at. - Warn(msg string, keyvals ...any) - - // Error logs an Error event. - // - // Critical events that require immediate attention. - // Loggers commonly provide Fatal and Panic levels above Error level, - // but exiting and panicking is out of scope for a logging library. - Error(msg string, keyvals ...any) -} - -// WithLogger sets a custom logger. -func WithLogger(l *slog.Logger) Option { - return optionFunc(func(v *Viper) { - v.logger = l - }) -} - -type discardHandler struct{} - -func (n *discardHandler) Enabled(_ context.Context, _ slog.Level) bool { - return false -} - -func (n *discardHandler) Handle(_ context.Context, _ slog.Record) error { - return nil -} - -func (n *discardHandler) WithAttrs(_ []slog.Attr) slog.Handler { - return n -} - -func (n *discardHandler) WithGroup(_ string) slog.Handler { - return n -} diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go deleted file mode 100644 index 117c6ac312..0000000000 --- a/vendor/github.com/spf13/viper/util.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -// Viper is a application configuration system. -// It believes that applications can be configured a variety of ways -// via flags, ENVIRONMENT variables, configuration files retrieved -// from the file system, or a remote key/value store. - -package viper - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - "unicode" - - slog "github.com/sagikazarmark/slog-shim" - "github.com/spf13/cast" -) - -// ConfigParseError denotes failing to parse configuration file. -type ConfigParseError struct { - err error -} - -// Error returns the formatted configuration error. -func (pe ConfigParseError) Error() string { - return fmt.Sprintf("While parsing config: %s", pe.err.Error()) -} - -// Unwrap returns the wrapped error. -func (pe ConfigParseError) Unwrap() error { - return pe.err -} - -// toCaseInsensitiveValue checks if the value is a map; -// if so, create a copy and lower-case the keys recursively. -func toCaseInsensitiveValue(value any) any { - switch v := value.(type) { - case map[any]any: - value = copyAndInsensitiviseMap(cast.ToStringMap(v)) - case map[string]any: - value = copyAndInsensitiviseMap(v) - } - - return value -} - -// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of -// any map it makes case insensitive. -func copyAndInsensitiviseMap(m map[string]any) map[string]any { - nm := make(map[string]any) - - for key, val := range m { - lkey := strings.ToLower(key) - switch v := val.(type) { - case map[any]any: - nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v)) - case map[string]any: - nm[lkey] = copyAndInsensitiviseMap(v) - default: - nm[lkey] = v - } - } - - return nm -} - -func insensitiviseVal(val any) any { - switch v := val.(type) { - case map[any]any: - // nested map: cast and recursively insensitivise - val = cast.ToStringMap(val) - insensitiviseMap(val.(map[string]any)) - case map[string]any: - // nested map: recursively insensitivise - insensitiviseMap(v) - case []any: - // nested array: recursively insensitivise - insensitiveArray(v) - } - return val -} - -func insensitiviseMap(m map[string]any) { - for key, val := range m { - val = insensitiviseVal(val) - lower := strings.ToLower(key) - if key != lower { - // remove old key (not lower-cased) - delete(m, key) - } - // update map - m[lower] = val - } -} - -func insensitiveArray(a []any) { - for i, val := range a { - a[i] = insensitiviseVal(val) - } -} - -func absPathify(logger *slog.Logger, inPath string) string { - logger.Info("trying to resolve absolute path", "path", inPath) - - if inPath == "$HOME" || strings.HasPrefix(inPath, "$HOME"+string(os.PathSeparator)) { - inPath = userHomeDir() + inPath[5:] - } - - inPath = os.ExpandEnv(inPath) - - if filepath.IsAbs(inPath) { - return filepath.Clean(inPath) - } - - p, err := filepath.Abs(inPath) - if err == nil { - return filepath.Clean(p) - } - - logger.Error(fmt.Errorf("could not discover absolute path: %w", err).Error()) - - return "" -} - -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -func userHomeDir() string { - if runtime.GOOS == "windows" { - home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") - if home == "" { - home = os.Getenv("USERPROFILE") - } - return home - } - return os.Getenv("HOME") -} - -func safeMul(a, b uint) uint { - c := a * b - if a > 1 && b > 1 && c/b != a { - return 0 - } - return c -} - -// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes. -func parseSizeInBytes(sizeStr string) uint { - sizeStr = strings.TrimSpace(sizeStr) - lastChar := len(sizeStr) - 1 - multiplier := uint(1) - - if lastChar > 0 { - if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' { - if lastChar > 1 { - switch unicode.ToLower(rune(sizeStr[lastChar-1])) { - case 'k': - multiplier = 1 << 10 - sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) - case 'm': - multiplier = 1 << 20 - sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) - case 'g': - multiplier = 1 << 30 - sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) - default: - multiplier = 1 - sizeStr = strings.TrimSpace(sizeStr[:lastChar]) - } - } - } - } - - size := cast.ToInt(sizeStr) - if size < 0 { - size = 0 - } - - return safeMul(uint(size), multiplier) -} - -// deepSearch scans deep maps, following the key indexes listed in the -// sequence "path". -// The last value is expected to be another map, and is returned. -// -// In case intermediate keys do not exist, or map to a non-map value, -// a new map is created and inserted, and the search continues from there: -// the initial map "m" may be modified! -func deepSearch(m map[string]any, path []string) map[string]any { - for _, k := range path { - m2, ok := m[k] - if !ok { - // intermediate key does not exist - // => create it and continue from there - m3 := make(map[string]any) - m[k] = m3 - m = m3 - continue - } - m3, ok := m2.(map[string]any) - if !ok { - // intermediate key is a value - // => replace with a new map - m3 = make(map[string]any) - m[k] = m3 - } - // continue search from here - m = m3 - } - return m -} diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go deleted file mode 100644 index 20eb4da177..0000000000 --- a/vendor/github.com/spf13/viper/viper.go +++ /dev/null @@ -1,2258 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -// Viper is an application configuration system. -// It believes that applications can be configured a variety of ways -// via flags, ENVIRONMENT variables, configuration files retrieved -// from the file system, or a remote key/value store. - -// Each item takes precedence over the item below it: - -// overrides -// flag -// env -// config -// key/value store -// default - -package viper - -import ( - "bytes" - "encoding/csv" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/fsnotify/fsnotify" - "github.com/mitchellh/mapstructure" - slog "github.com/sagikazarmark/slog-shim" - "github.com/spf13/afero" - "github.com/spf13/cast" - "github.com/spf13/pflag" - - "github.com/spf13/viper/internal/encoding" - "github.com/spf13/viper/internal/encoding/dotenv" - "github.com/spf13/viper/internal/encoding/hcl" - "github.com/spf13/viper/internal/encoding/ini" - "github.com/spf13/viper/internal/encoding/javaproperties" - "github.com/spf13/viper/internal/encoding/json" - "github.com/spf13/viper/internal/encoding/toml" - "github.com/spf13/viper/internal/encoding/yaml" - "github.com/spf13/viper/internal/features" -) - -// ConfigMarshalError happens when failing to marshal the configuration. -type ConfigMarshalError struct { - err error -} - -// Error returns the formatted configuration error. -func (e ConfigMarshalError) Error() string { - return fmt.Sprintf("While marshaling config: %s", e.err.Error()) -} - -var v *Viper - -type RemoteResponse struct { - Value []byte - Error error -} - -func init() { - v = New() -} - -type remoteConfigFactory interface { - Get(rp RemoteProvider) (io.Reader, error) - Watch(rp RemoteProvider) (io.Reader, error) - WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) -} - -// RemoteConfig is optional, see the remote package. -var RemoteConfig remoteConfigFactory - -// UnsupportedConfigError denotes encountering an unsupported -// configuration filetype. -type UnsupportedConfigError string - -// Error returns the formatted configuration error. -func (str UnsupportedConfigError) Error() string { - return fmt.Sprintf("Unsupported Config Type %q", string(str)) -} - -// UnsupportedRemoteProviderError denotes encountering an unsupported remote -// provider. Currently only etcd and Consul are supported. -type UnsupportedRemoteProviderError string - -// Error returns the formatted remote provider error. -func (str UnsupportedRemoteProviderError) Error() string { - return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str)) -} - -// RemoteConfigError denotes encountering an error while trying to -// pull the configuration from the remote provider. -type RemoteConfigError string - -// Error returns the formatted remote provider error. -func (rce RemoteConfigError) Error() string { - return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) -} - -// ConfigFileNotFoundError denotes failing to find configuration file. -type ConfigFileNotFoundError struct { - name, locations string -} - -// Error returns the formatted configuration error. -func (fnfe ConfigFileNotFoundError) Error() string { - return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations) -} - -// ConfigFileAlreadyExistsError denotes failure to write new configuration file. -type ConfigFileAlreadyExistsError string - -// Error returns the formatted error when configuration already exists. -func (faee ConfigFileAlreadyExistsError) Error() string { - return fmt.Sprintf("Config File %q Already Exists", string(faee)) -} - -// A DecoderConfigOption can be passed to viper.Unmarshal to configure -// mapstructure.DecoderConfig options. -type DecoderConfigOption func(*mapstructure.DecoderConfig) - -// DecodeHook returns a DecoderConfigOption which overrides the default -// DecoderConfig.DecodeHook value, the default is: -// -// mapstructure.ComposeDecodeHookFunc( -// mapstructure.StringToTimeDurationHookFunc(), -// mapstructure.StringToSliceHookFunc(","), -// ) -func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption { - return func(c *mapstructure.DecoderConfig) { - c.DecodeHook = hook - } -} - -// Viper is a prioritized configuration registry. It -// maintains a set of configuration sources, fetches -// values to populate those, and provides them according -// to the source's priority. -// The priority of the sources is the following: -// 1. overrides -// 2. flags -// 3. env. variables -// 4. config file -// 5. key/value store -// 6. defaults -// -// For example, if values from the following sources were loaded: -// -// Defaults : { -// "secret": "", -// "user": "default", -// "endpoint": "https://localhost" -// } -// Config : { -// "user": "root" -// "secret": "defaultsecret" -// } -// Env : { -// "secret": "somesecretkey" -// } -// -// The resulting config will have the following values: -// -// { -// "secret": "somesecretkey", -// "user": "root", -// "endpoint": "https://localhost" -// } -// -// Note: Vipers are not safe for concurrent Get() and Set() operations. -type Viper struct { - // Delimiter that separates a list of keys - // used to access a nested value in one go - keyDelim string - - // A set of paths to look for the config file in - configPaths []string - - // The filesystem to read config from. - fs afero.Fs - - // A set of remote providers to search for the configuration - remoteProviders []*defaultRemoteProvider - - // Name of file to look for inside the path - configName string - configFile string - configType string - configPermissions os.FileMode - envPrefix string - - // Specific commands for ini parsing - iniLoadOptions ini.LoadOptions - - automaticEnvApplied bool - envKeyReplacer StringReplacer - allowEmptyEnv bool - - parents []string - config map[string]any - override map[string]any - defaults map[string]any - kvstore map[string]any - pflags map[string]FlagValue - env map[string][]string - aliases map[string]string - typeByDefValue bool - - onConfigChange func(fsnotify.Event) - - logger *slog.Logger - - // TODO: should probably be protected with a mutex - encoderRegistry *encoding.EncoderRegistry - decoderRegistry *encoding.DecoderRegistry -} - -// New returns an initialized Viper instance. -func New() *Viper { - v := new(Viper) - v.keyDelim = "." - v.configName = "config" - v.configPermissions = os.FileMode(0o644) - v.fs = afero.NewOsFs() - v.config = make(map[string]any) - v.parents = []string{} - v.override = make(map[string]any) - v.defaults = make(map[string]any) - v.kvstore = make(map[string]any) - v.pflags = make(map[string]FlagValue) - v.env = make(map[string][]string) - v.aliases = make(map[string]string) - v.typeByDefValue = false - v.logger = slog.New(&discardHandler{}) - - v.resetEncoding() - - return v -} - -// Option configures Viper using the functional options paradigm popularized by Rob Pike and Dave Cheney. -// If you're unfamiliar with this style, -// see https://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html and -// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis. -type Option interface { - apply(v *Viper) -} - -type optionFunc func(v *Viper) - -func (fn optionFunc) apply(v *Viper) { - fn(v) -} - -// KeyDelimiter sets the delimiter used for determining key parts. -// By default it's value is ".". -func KeyDelimiter(d string) Option { - return optionFunc(func(v *Viper) { - v.keyDelim = d - }) -} - -// StringReplacer applies a set of replacements to a string. -type StringReplacer interface { - // Replace returns a copy of s with all replacements performed. - Replace(s string) string -} - -// EnvKeyReplacer sets a replacer used for mapping environment variables to internal keys. -func EnvKeyReplacer(r StringReplacer) Option { - return optionFunc(func(v *Viper) { - v.envKeyReplacer = r - }) -} - -// NewWithOptions creates a new Viper instance. -func NewWithOptions(opts ...Option) *Viper { - v := New() - - for _, opt := range opts { - opt.apply(v) - } - - v.resetEncoding() - - return v -} - -// Reset is intended for testing, will reset all to default settings. -// In the public interface for the viper package so applications -// can use it in their testing as well. -func Reset() { - v = New() - SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} - SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} -} - -// TODO: make this lazy initialization instead. -func (v *Viper) resetEncoding() { - encoderRegistry := encoding.NewEncoderRegistry() - decoderRegistry := encoding.NewDecoderRegistry() - - { - codec := yaml.Codec{} - - encoderRegistry.RegisterEncoder("yaml", codec) - decoderRegistry.RegisterDecoder("yaml", codec) - - encoderRegistry.RegisterEncoder("yml", codec) - decoderRegistry.RegisterDecoder("yml", codec) - } - - { - codec := json.Codec{} - - encoderRegistry.RegisterEncoder("json", codec) - decoderRegistry.RegisterDecoder("json", codec) - } - - { - codec := toml.Codec{} - - encoderRegistry.RegisterEncoder("toml", codec) - decoderRegistry.RegisterDecoder("toml", codec) - } - - { - codec := hcl.Codec{} - - encoderRegistry.RegisterEncoder("hcl", codec) - decoderRegistry.RegisterDecoder("hcl", codec) - - encoderRegistry.RegisterEncoder("tfvars", codec) - decoderRegistry.RegisterDecoder("tfvars", codec) - } - - { - codec := ini.Codec{ - KeyDelimiter: v.keyDelim, - LoadOptions: v.iniLoadOptions, - } - - encoderRegistry.RegisterEncoder("ini", codec) - decoderRegistry.RegisterDecoder("ini", codec) - } - - { - codec := &javaproperties.Codec{ - KeyDelimiter: v.keyDelim, - } - - encoderRegistry.RegisterEncoder("properties", codec) - decoderRegistry.RegisterDecoder("properties", codec) - - encoderRegistry.RegisterEncoder("props", codec) - decoderRegistry.RegisterDecoder("props", codec) - - encoderRegistry.RegisterEncoder("prop", codec) - decoderRegistry.RegisterDecoder("prop", codec) - } - - { - codec := &dotenv.Codec{} - - encoderRegistry.RegisterEncoder("dotenv", codec) - decoderRegistry.RegisterDecoder("dotenv", codec) - - encoderRegistry.RegisterEncoder("env", codec) - decoderRegistry.RegisterDecoder("env", codec) - } - - v.encoderRegistry = encoderRegistry - v.decoderRegistry = decoderRegistry -} - -type defaultRemoteProvider struct { - provider string - endpoint string - path string - secretKeyring string -} - -func (rp defaultRemoteProvider) Provider() string { - return rp.provider -} - -func (rp defaultRemoteProvider) Endpoint() string { - return rp.endpoint -} - -func (rp defaultRemoteProvider) Path() string { - return rp.path -} - -func (rp defaultRemoteProvider) SecretKeyring() string { - return rp.secretKeyring -} - -// RemoteProvider stores the configuration necessary -// to connect to a remote key/value store. -// Optional secretKeyring to unencrypt encrypted values -// can be provided. -type RemoteProvider interface { - Provider() string - Endpoint() string - Path() string - SecretKeyring() string -} - -// SupportedExts are universally supported extensions. -var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} - -// SupportedRemoteProviders are universally supported remote providers. -var SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} - -// OnConfigChange sets the event handler that is called when a config file changes. -func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } - -// OnConfigChange sets the event handler that is called when a config file changes. -func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { - v.onConfigChange = run -} - -// WatchConfig starts watching a config file for changes. -func WatchConfig() { v.WatchConfig() } - -// WatchConfig starts watching a config file for changes. -func (v *Viper) WatchConfig() { - initWG := sync.WaitGroup{} - initWG.Add(1) - go func() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - v.logger.Error(fmt.Sprintf("failed to create watcher: %s", err)) - os.Exit(1) - } - defer watcher.Close() - // we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way - filename, err := v.getConfigFile() - if err != nil { - v.logger.Error(fmt.Sprintf("get config file: %s", err)) - initWG.Done() - return - } - - configFile := filepath.Clean(filename) - configDir, _ := filepath.Split(configFile) - realConfigFile, _ := filepath.EvalSymlinks(filename) - - eventsWG := sync.WaitGroup{} - eventsWG.Add(1) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { // 'Events' channel is closed - eventsWG.Done() - return - } - currentConfigFile, _ := filepath.EvalSymlinks(filename) - // we only care about the config file with the following cases: - // 1 - if the config file was modified or created - // 2 - if the real path to the config file changed (eg: k8s ConfigMap replacement) - if (filepath.Clean(event.Name) == configFile && - (event.Has(fsnotify.Write) || event.Has(fsnotify.Create))) || - (currentConfigFile != "" && currentConfigFile != realConfigFile) { - realConfigFile = currentConfigFile - err := v.ReadInConfig() - if err != nil { - v.logger.Error(fmt.Sprintf("read config file: %s", err)) - } - if v.onConfigChange != nil { - v.onConfigChange(event) - } - } else if filepath.Clean(event.Name) == configFile && event.Has(fsnotify.Remove) { - eventsWG.Done() - return - } - - case err, ok := <-watcher.Errors: - if ok { // 'Errors' channel is not closed - v.logger.Error(fmt.Sprintf("watcher error: %s", err)) - } - eventsWG.Done() - return - } - } - }() - watcher.Add(configDir) - initWG.Done() // done initializing the watch in this go routine, so the parent routine can move on... - eventsWG.Wait() // now, wait for event loop to end in this go-routine... - }() - initWG.Wait() // make sure that the go routine above fully ended before returning -} - -// SetConfigFile explicitly defines the path, name and extension of the config file. -// Viper will use this and not check any of the config paths. -func SetConfigFile(in string) { v.SetConfigFile(in) } - -func (v *Viper) SetConfigFile(in string) { - if in != "" { - v.configFile = in - } -} - -// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use. -// E.g. if your prefix is "spf", the env registry will look for env -// variables that start with "SPF_". -func SetEnvPrefix(in string) { v.SetEnvPrefix(in) } - -func (v *Viper) SetEnvPrefix(in string) { - if in != "" { - v.envPrefix = in - } -} - -func GetEnvPrefix() string { return v.GetEnvPrefix() } - -func (v *Viper) GetEnvPrefix() string { - return v.envPrefix -} - -func (v *Viper) mergeWithEnvPrefix(in string) string { - if v.envPrefix != "" { - return strings.ToUpper(v.envPrefix + "_" + in) - } - - return strings.ToUpper(in) -} - -// AllowEmptyEnv tells Viper to consider set, -// but empty environment variables as valid values instead of falling back. -// For backward compatibility reasons this is false by default. -func AllowEmptyEnv(allowEmptyEnv bool) { v.AllowEmptyEnv(allowEmptyEnv) } - -func (v *Viper) AllowEmptyEnv(allowEmptyEnv bool) { - v.allowEmptyEnv = allowEmptyEnv -} - -// TODO: should getEnv logic be moved into find(). Can generalize the use of -// rewriting keys many things, Ex: Get('someKey') -> some_key -// (camel case to snake case for JSON keys perhaps) - -// getEnv is a wrapper around os.Getenv which replaces characters in the original -// key. This allows env vars which have different keys than the config object -// keys. -func (v *Viper) getEnv(key string) (string, bool) { - if v.envKeyReplacer != nil { - key = v.envKeyReplacer.Replace(key) - } - - val, ok := os.LookupEnv(key) - - return val, ok && (v.allowEmptyEnv || val != "") -} - -// ConfigFileUsed returns the file used to populate the config registry. -func ConfigFileUsed() string { return v.ConfigFileUsed() } -func (v *Viper) ConfigFileUsed() string { return v.configFile } - -// AddConfigPath adds a path for Viper to search for the config file in. -// Can be called multiple times to define multiple search paths. -func AddConfigPath(in string) { v.AddConfigPath(in) } - -func (v *Viper) AddConfigPath(in string) { - if in != "" { - absin := absPathify(v.logger, in) - - v.logger.Info("adding path to search paths", "path", absin) - if !stringInSlice(absin, v.configPaths) { - v.configPaths = append(v.configPaths, absin) - } - } -} - -// AddRemoteProvider adds a remote configuration source. -// Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. -// endpoint is the url. etcd requires http://ip:port, consul requires ip:port, nats requires nats://ip:port -// path is the path in the k/v store to retrieve configuration -// To retrieve a config file called myapp.json from /configs/myapp.json -// you should set path to /configs and set config name (SetConfigName()) to -// "myapp". -func AddRemoteProvider(provider, endpoint, path string) error { - return v.AddRemoteProvider(provider, endpoint, path) -} - -func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { - if !stringInSlice(provider, SupportedRemoteProviders) { - return UnsupportedRemoteProviderError(provider) - } - if provider != "" && endpoint != "" { - v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) - - rp := &defaultRemoteProvider{ - endpoint: endpoint, - provider: provider, - path: path, - } - if !v.providerPathExists(rp) { - v.remoteProviders = append(v.remoteProviders, rp) - } - } - return nil -} - -// AddSecureRemoteProvider adds a remote configuration source. -// Secure Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. -// endpoint is the url. etcd requires http://ip:port consul requires ip:port -// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg -// path is the path in the k/v store to retrieve configuration -// To retrieve a config file called myapp.json from /configs/myapp.json -// you should set path to /configs and set config name (SetConfigName()) to -// "myapp". -// Secure Remote Providers are implemented with github.com/bketelsen/crypt. -func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { - return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) -} - -func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { - if !stringInSlice(provider, SupportedRemoteProviders) { - return UnsupportedRemoteProviderError(provider) - } - if provider != "" && endpoint != "" { - v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) - - rp := &defaultRemoteProvider{ - endpoint: endpoint, - provider: provider, - path: path, - secretKeyring: secretkeyring, - } - if !v.providerPathExists(rp) { - v.remoteProviders = append(v.remoteProviders, rp) - } - } - return nil -} - -func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { - for _, y := range v.remoteProviders { - if reflect.DeepEqual(y, p) { - return true - } - } - return false -} - -// searchMap recursively searches for a value for path in source map. -// Returns nil if not found. -// Note: This assumes that the path entries and map keys are lower cased. -func (v *Viper) searchMap(source map[string]any, path []string) any { - if len(path) == 0 { - return source - } - - next, ok := source[path[0]] - if ok { - // Fast path - if len(path) == 1 { - return next - } - - // Nested case - switch next := next.(type) { - case map[any]any: - return v.searchMap(cast.ToStringMap(next), path[1:]) - case map[string]any: - // Type assertion is safe here since it is only reached - // if the type of `next` is the same as the type being asserted - return v.searchMap(next, path[1:]) - default: - // got a value but nested key expected, return "nil" for not found - return nil - } - } - return nil -} - -// searchIndexableWithPathPrefixes recursively searches for a value for path in source map/slice. -// -// While searchMap() considers each path element as a single map key or slice index, this -// function searches for, and prioritizes, merged path elements. -// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar" -// is also defined, this latter value is returned for path ["foo", "bar"]. -// -// This should be useful only at config level (other maps may not contain dots -// in their keys). -// -// Note: This assumes that the path entries and map keys are lower cased. -func (v *Viper) searchIndexableWithPathPrefixes(source any, path []string) any { - if len(path) == 0 { - return source - } - - // search for path prefixes, starting from the longest one - for i := len(path); i > 0; i-- { - prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim)) - - var val any - switch sourceIndexable := source.(type) { - case []any: - val = v.searchSliceWithPathPrefixes(sourceIndexable, prefixKey, i, path) - case map[string]any: - val = v.searchMapWithPathPrefixes(sourceIndexable, prefixKey, i, path) - } - if val != nil { - return val - } - } - - // not found - return nil -} - -// searchSliceWithPathPrefixes searches for a value for path in sourceSlice -// -// This function is part of the searchIndexableWithPathPrefixes recurring search and -// should not be called directly from functions other than searchIndexableWithPathPrefixes. -func (v *Viper) searchSliceWithPathPrefixes( - sourceSlice []any, - prefixKey string, - pathIndex int, - path []string, -) any { - // if the prefixKey is not a number or it is out of bounds of the slice - index, err := strconv.Atoi(prefixKey) - if err != nil || len(sourceSlice) <= index { - return nil - } - - next := sourceSlice[index] - - // Fast path - if pathIndex == len(path) { - return next - } - - switch n := next.(type) { - case map[any]any: - return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) - case map[string]any, []any: - return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) - default: - // got a value but nested key expected, do nothing and look for next prefix - } - - // not found - return nil -} - -// searchMapWithPathPrefixes searches for a value for path in sourceMap -// -// This function is part of the searchIndexableWithPathPrefixes recurring search and -// should not be called directly from functions other than searchIndexableWithPathPrefixes. -func (v *Viper) searchMapWithPathPrefixes( - sourceMap map[string]any, - prefixKey string, - pathIndex int, - path []string, -) any { - next, ok := sourceMap[prefixKey] - if !ok { - return nil - } - - // Fast path - if pathIndex == len(path) { - return next - } - - // Nested case - switch n := next.(type) { - case map[any]any: - return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) - case map[string]any, []any: - return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) - default: - // got a value but nested key expected, do nothing and look for next prefix - } - - // not found - return nil -} - -// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere -// on its path in the map. -// e.g., if "foo.bar" has a value in the given map, it “shadows” -// -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]any) string { - var parentVal any - for i := 1; i < len(path); i++ { - parentVal = v.searchMap(m, path[0:i]) - if parentVal == nil { - // not found, no need to add more path elements - return "" - } - switch parentVal.(type) { - case map[any]any: - continue - case map[string]any: - continue - default: - // parentVal is a regular value which shadows "path" - return strings.Join(path[0:i], v.keyDelim) - } - } - return "" -} - -// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere -// in a sub-path of the map. -// e.g., if "foo.bar" has a value in the given map, it “shadows” -// -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInFlatMap(path []string, mi any) string { - // unify input map - var m map[string]interface{} - switch miv := mi.(type) { - case map[string]string: - m = castMapStringToMapInterface(miv) - case map[string]FlagValue: - m = castMapFlagToMapInterface(miv) - default: - return "" - } - - // scan paths - var parentKey string - for i := 1; i < len(path); i++ { - parentKey = strings.Join(path[0:i], v.keyDelim) - if _, ok := m[parentKey]; ok { - return parentKey - } - } - return "" -} - -// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere -// in the environment, when automatic env is on. -// e.g., if "foo.bar" has a value in the environment, it “shadows” -// -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInAutoEnv(path []string) string { - var parentKey string - for i := 1; i < len(path); i++ { - parentKey = strings.Join(path[0:i], v.keyDelim) - if _, ok := v.getEnv(v.mergeWithEnvPrefix(parentKey)); ok { - return parentKey - } - } - return "" -} - -// SetTypeByDefaultValue enables or disables the inference of a key value's -// type when the Get function is used based upon a key's default value as -// opposed to the value returned based on the normal fetch logic. -// -// For example, if a key has a default value of []string{} and the same key -// is set via an environment variable to "a b c", a call to the Get function -// would return a string slice for the key if the key's type is inferred by -// the default value and the Get function would return: -// -// []string {"a", "b", "c"} -// -// Otherwise the Get function would return: -// -// "a b c" -func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) } - -func (v *Viper) SetTypeByDefaultValue(enable bool) { - v.typeByDefValue = enable -} - -// GetViper gets the global Viper instance. -func GetViper() *Viper { - return v -} - -// Get can retrieve any value given the key to use. -// Get is case-insensitive for a key. -// Get has the behavior of returning the value associated with the first -// place from where it is set. Viper will check in the following order: -// override, flag, env, config file, key/value store, default -// -// Get returns an interface. For a specific value use one of the Get____ methods. -func Get(key string) any { return v.Get(key) } - -func (v *Viper) Get(key string) any { - lcaseKey := strings.ToLower(key) - val := v.find(lcaseKey, true) - if val == nil { - return nil - } - - if v.typeByDefValue { - // TODO(bep) this branch isn't covered by a single test. - valType := val - path := strings.Split(lcaseKey, v.keyDelim) - defVal := v.searchMap(v.defaults, path) - if defVal != nil { - valType = defVal - } - - switch valType.(type) { - case bool: - return cast.ToBool(val) - case string: - return cast.ToString(val) - case int32, int16, int8, int: - return cast.ToInt(val) - case uint: - return cast.ToUint(val) - case uint32: - return cast.ToUint32(val) - case uint64: - return cast.ToUint64(val) - case int64: - return cast.ToInt64(val) - case float64, float32: - return cast.ToFloat64(val) - case time.Time: - return cast.ToTime(val) - case time.Duration: - return cast.ToDuration(val) - case []string: - return cast.ToStringSlice(val) - case []int: - return cast.ToIntSlice(val) - case []time.Duration: - return cast.ToDurationSlice(val) - } - } - - return val -} - -// Sub returns new Viper instance representing a sub tree of this instance. -// Sub is case-insensitive for a key. -func Sub(key string) *Viper { return v.Sub(key) } - -func (v *Viper) Sub(key string) *Viper { - subv := New() - data := v.Get(key) - if data == nil { - return nil - } - - if reflect.TypeOf(data).Kind() == reflect.Map { - subv.parents = append([]string(nil), v.parents...) - subv.parents = append(subv.parents, strings.ToLower(key)) - subv.automaticEnvApplied = v.automaticEnvApplied - subv.envPrefix = v.envPrefix - subv.envKeyReplacer = v.envKeyReplacer - subv.config = cast.ToStringMap(data) - return subv - } - return nil -} - -// GetString returns the value associated with the key as a string. -func GetString(key string) string { return v.GetString(key) } - -func (v *Viper) GetString(key string) string { - return cast.ToString(v.Get(key)) -} - -// GetBool returns the value associated with the key as a boolean. -func GetBool(key string) bool { return v.GetBool(key) } - -func (v *Viper) GetBool(key string) bool { - return cast.ToBool(v.Get(key)) -} - -// GetInt returns the value associated with the key as an integer. -func GetInt(key string) int { return v.GetInt(key) } - -func (v *Viper) GetInt(key string) int { - return cast.ToInt(v.Get(key)) -} - -// GetInt32 returns the value associated with the key as an integer. -func GetInt32(key string) int32 { return v.GetInt32(key) } - -func (v *Viper) GetInt32(key string) int32 { - return cast.ToInt32(v.Get(key)) -} - -// GetInt64 returns the value associated with the key as an integer. -func GetInt64(key string) int64 { return v.GetInt64(key) } - -func (v *Viper) GetInt64(key string) int64 { - return cast.ToInt64(v.Get(key)) -} - -// GetUint returns the value associated with the key as an unsigned integer. -func GetUint(key string) uint { return v.GetUint(key) } - -func (v *Viper) GetUint(key string) uint { - return cast.ToUint(v.Get(key)) -} - -// GetUint16 returns the value associated with the key as an unsigned integer. -func GetUint16(key string) uint16 { return v.GetUint16(key) } - -func (v *Viper) GetUint16(key string) uint16 { - return cast.ToUint16(v.Get(key)) -} - -// GetUint32 returns the value associated with the key as an unsigned integer. -func GetUint32(key string) uint32 { return v.GetUint32(key) } - -func (v *Viper) GetUint32(key string) uint32 { - return cast.ToUint32(v.Get(key)) -} - -// GetUint64 returns the value associated with the key as an unsigned integer. -func GetUint64(key string) uint64 { return v.GetUint64(key) } - -func (v *Viper) GetUint64(key string) uint64 { - return cast.ToUint64(v.Get(key)) -} - -// GetFloat64 returns the value associated with the key as a float64. -func GetFloat64(key string) float64 { return v.GetFloat64(key) } - -func (v *Viper) GetFloat64(key string) float64 { - return cast.ToFloat64(v.Get(key)) -} - -// GetTime returns the value associated with the key as time. -func GetTime(key string) time.Time { return v.GetTime(key) } - -func (v *Viper) GetTime(key string) time.Time { - return cast.ToTime(v.Get(key)) -} - -// GetDuration returns the value associated with the key as a duration. -func GetDuration(key string) time.Duration { return v.GetDuration(key) } - -func (v *Viper) GetDuration(key string) time.Duration { - return cast.ToDuration(v.Get(key)) -} - -// GetIntSlice returns the value associated with the key as a slice of int values. -func GetIntSlice(key string) []int { return v.GetIntSlice(key) } - -func (v *Viper) GetIntSlice(key string) []int { - return cast.ToIntSlice(v.Get(key)) -} - -// GetStringSlice returns the value associated with the key as a slice of strings. -func GetStringSlice(key string) []string { return v.GetStringSlice(key) } - -func (v *Viper) GetStringSlice(key string) []string { - return cast.ToStringSlice(v.Get(key)) -} - -// GetStringMap returns the value associated with the key as a map of interfaces. -func GetStringMap(key string) map[string]any { return v.GetStringMap(key) } - -func (v *Viper) GetStringMap(key string) map[string]any { - return cast.ToStringMap(v.Get(key)) -} - -// GetStringMapString returns the value associated with the key as a map of strings. -func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) } - -func (v *Viper) GetStringMapString(key string) map[string]string { - return cast.ToStringMapString(v.Get(key)) -} - -// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. -func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) } - -func (v *Viper) GetStringMapStringSlice(key string) map[string][]string { - return cast.ToStringMapStringSlice(v.Get(key)) -} - -// GetSizeInBytes returns the size of the value associated with the given key -// in bytes. -func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) } - -func (v *Viper) GetSizeInBytes(key string) uint { - sizeStr := cast.ToString(v.Get(key)) - return parseSizeInBytes(sizeStr) -} - -// UnmarshalKey takes a single key and unmarshals it into a Struct. -func UnmarshalKey(key string, rawVal any, opts ...DecoderConfigOption) error { - return v.UnmarshalKey(key, rawVal, opts...) -} - -func (v *Viper) UnmarshalKey(key string, rawVal any, opts ...DecoderConfigOption) error { - return decode(v.Get(key), defaultDecoderConfig(rawVal, opts...)) -} - -// Unmarshal unmarshals the config into a Struct. Make sure that the tags -// on the fields of the structure are properly set. -func Unmarshal(rawVal any, opts ...DecoderConfigOption) error { - return v.Unmarshal(rawVal, opts...) -} - -func (v *Viper) Unmarshal(rawVal any, opts ...DecoderConfigOption) error { - keys := v.AllKeys() - - if features.BindStruct { - // TODO: make this optional? - structKeys, err := v.decodeStructKeys(rawVal, opts...) - if err != nil { - return err - } - - keys = append(keys, structKeys...) - } - - // TODO: struct keys should be enough? - return decode(v.getSettings(keys), defaultDecoderConfig(rawVal, opts...)) -} - -func (v *Viper) decodeStructKeys(input any, opts ...DecoderConfigOption) ([]string, error) { - var structKeyMap map[string]any - - err := decode(input, defaultDecoderConfig(&structKeyMap, opts...)) - if err != nil { - return nil, err - } - - flattenedStructKeyMap := v.flattenAndMergeMap(map[string]bool{}, structKeyMap, "") - - r := make([]string, 0, len(flattenedStructKeyMap)) - for v := range flattenedStructKeyMap { - r = append(r, v) - } - - return r, nil -} - -// defaultDecoderConfig returns default mapstructure.DecoderConfig with support -// of time.Duration values & string slices. -func defaultDecoderConfig(output any, opts ...DecoderConfigOption) *mapstructure.DecoderConfig { - c := &mapstructure.DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - DecodeHook: mapstructure.ComposeDecodeHookFunc( - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - ), - } - for _, opt := range opts { - opt(c) - } - return c -} - -// decode is a wrapper around mapstructure.Decode that mimics the WeakDecode functionality. -func decode(input any, config *mapstructure.DecoderConfig) error { - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - return decoder.Decode(input) -} - -// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent -// in the destination struct. -func UnmarshalExact(rawVal any, opts ...DecoderConfigOption) error { - return v.UnmarshalExact(rawVal, opts...) -} - -func (v *Viper) UnmarshalExact(rawVal any, opts ...DecoderConfigOption) error { - config := defaultDecoderConfig(rawVal, opts...) - config.ErrorUnused = true - - keys := v.AllKeys() - - if features.BindStruct { - // TODO: make this optional? - structKeys, err := v.decodeStructKeys(rawVal, opts...) - if err != nil { - return err - } - - keys = append(keys, structKeys...) - } - - // TODO: struct keys should be enough? - return decode(v.getSettings(keys), config) -} - -// BindPFlags binds a full flag set to the configuration, using each flag's long -// name as the config key. -func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) } - -func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { - return v.BindFlagValues(pflagValueSet{flags}) -} - -// BindPFlag binds a specific key to a pflag (as used by cobra). -// Example (where serverCmd is a Cobra instance): -// -// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") -// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) -func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) } - -func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { - if flag == nil { - return fmt.Errorf("flag for %q is nil", key) - } - return v.BindFlagValue(key, pflagValue{flag}) -} - -// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long -// name as the config key. -func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) } - -func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { - flags.VisitAll(func(flag FlagValue) { - if err = v.BindFlagValue(flag.Name(), flag); err != nil { - return - } - }) - return nil -} - -// BindFlagValue binds a specific key to a FlagValue. -func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) } - -func (v *Viper) BindFlagValue(key string, flag FlagValue) error { - if flag == nil { - return fmt.Errorf("flag for %q is nil", key) - } - v.pflags[strings.ToLower(key)] = flag - return nil -} - -// BindEnv binds a Viper key to a ENV variable. -// ENV variables are case sensitive. -// If only a key is provided, it will use the env key matching the key, uppercased. -// If more arguments are provided, they will represent the env variable names that -// should bind to this key and will be taken in the specified order. -// EnvPrefix will be used when set when env name is not provided. -func BindEnv(input ...string) error { return v.BindEnv(input...) } - -func (v *Viper) BindEnv(input ...string) error { - if len(input) == 0 { - return fmt.Errorf("missing key to bind to") - } - - key := strings.ToLower(input[0]) - - if len(input) == 1 { - v.env[key] = append(v.env[key], v.mergeWithEnvPrefix(key)) - } else { - v.env[key] = append(v.env[key], input[1:]...) - } - - return nil -} - -// MustBindEnv wraps BindEnv in a panic. -// If there is an error binding an environment variable, MustBindEnv will -// panic. -func MustBindEnv(input ...string) { v.MustBindEnv(input...) } - -func (v *Viper) MustBindEnv(input ...string) { - if err := v.BindEnv(input...); err != nil { - panic(fmt.Sprintf("error while binding environment variable: %v", err)) - } -} - -// Given a key, find the value. -// -// Viper will check to see if an alias exists first. -// Viper will then check in the following order: -// flag, env, config file, key/value store. -// Lastly, if no value was found and flagDefault is true, and if the key -// corresponds to a flag, the flag's default value is returned. -// -// Note: this assumes a lower-cased key given. -func (v *Viper) find(lcaseKey string, flagDefault bool) any { - var ( - val any - exists bool - path = strings.Split(lcaseKey, v.keyDelim) - nested = len(path) > 1 - ) - - // compute the path through the nested maps to the nested value - if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" { - return nil - } - - // if the requested key is an alias, then return the proper key - lcaseKey = v.realKey(lcaseKey) - path = strings.Split(lcaseKey, v.keyDelim) - nested = len(path) > 1 - - // Set() override first - val = v.searchMap(v.override, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.override) != "" { - return nil - } - - // PFlag override next - flag, exists := v.pflags[lcaseKey] - if exists && flag.HasChanged() { - switch flag.ValueType() { - case "int", "int8", "int16", "int32", "int64": - return cast.ToInt(flag.ValueString()) - case "bool": - return cast.ToBool(flag.ValueString()) - case "stringSlice", "stringArray": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return res - case "intSlice": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return cast.ToIntSlice(res) - case "durationSlice": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - slice := strings.Split(s, ",") - return cast.ToDurationSlice(slice) - case "stringToString": - return stringToStringConv(flag.ValueString()) - case "stringToInt": - return stringToIntConv(flag.ValueString()) - default: - return flag.ValueString() - } - } - if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" { - return nil - } - - // Env override next - if v.automaticEnvApplied { - envKey := strings.Join(append(v.parents, lcaseKey), ".") - // even if it hasn't been registered, if automaticEnv is used, - // check any Get request - if val, ok := v.getEnv(v.mergeWithEnvPrefix(envKey)); ok { - return val - } - if nested && v.isPathShadowedInAutoEnv(path) != "" { - return nil - } - } - envkeys, exists := v.env[lcaseKey] - if exists { - for _, envkey := range envkeys { - if val, ok := v.getEnv(envkey); ok { - return val - } - } - } - if nested && v.isPathShadowedInFlatMap(path, v.env) != "" { - return nil - } - - // Config file next - val = v.searchIndexableWithPathPrefixes(v.config, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.config) != "" { - return nil - } - - // K/V store next - val = v.searchMap(v.kvstore, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" { - return nil - } - - // Default next - val = v.searchMap(v.defaults, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" { - return nil - } - - if flagDefault { - // last chance: if no value is found and a flag does exist for the key, - // get the flag's default value even if the flag's value has not been set. - if flag, exists := v.pflags[lcaseKey]; exists { - switch flag.ValueType() { - case "int", "int8", "int16", "int32", "int64": - return cast.ToInt(flag.ValueString()) - case "bool": - return cast.ToBool(flag.ValueString()) - case "stringSlice", "stringArray": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return res - case "intSlice": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return cast.ToIntSlice(res) - case "stringToString": - return stringToStringConv(flag.ValueString()) - case "stringToInt": - return stringToIntConv(flag.ValueString()) - case "durationSlice": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - slice := strings.Split(s, ",") - return cast.ToDurationSlice(slice) - default: - return flag.ValueString() - } - } - // last item, no need to check shadowing - } - - return nil -} - -func readAsCSV(val string) ([]string, error) { - if val == "" { - return []string{}, nil - } - stringReader := strings.NewReader(val) - csvReader := csv.NewReader(stringReader) - return csvReader.Read() -} - -// mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/master/string_to_string.go#L79 -// alterations are: errors are swallowed, map[string]any is returned in order to enable cast.ToStringMap. -func stringToStringConv(val string) any { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if val == "" { - return map[string]any{} - } - r := csv.NewReader(strings.NewReader(val)) - ss, err := r.Read() - if err != nil { - return nil - } - out := make(map[string]any, len(ss)) - for _, pair := range ss { - k, vv, found := strings.Cut(pair, "=") - if !found { - return nil - } - out[k] = vv - } - return out -} - -// mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/d5e0c0615acee7028e1e2740a11102313be88de1/string_to_int.go#L68 -// alterations are: errors are swallowed, map[string]any is returned in order to enable cast.ToStringMap. -func stringToIntConv(val string) any { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if val == "" { - return map[string]any{} - } - ss := strings.Split(val, ",") - out := make(map[string]any, len(ss)) - for _, pair := range ss { - k, vv, found := strings.Cut(pair, "=") - if !found { - return nil - } - var err error - out[k], err = strconv.Atoi(vv) - if err != nil { - return nil - } - } - return out -} - -// IsSet checks to see if the key has been set in any of the data locations. -// IsSet is case-insensitive for a key. -func IsSet(key string) bool { return v.IsSet(key) } - -func (v *Viper) IsSet(key string) bool { - lcaseKey := strings.ToLower(key) - val := v.find(lcaseKey, false) - return val != nil -} - -// AutomaticEnv makes Viper check if environment variables match any of the existing keys -// (config, default or flags). If matching env vars are found, they are loaded into Viper. -func AutomaticEnv() { v.AutomaticEnv() } - -func (v *Viper) AutomaticEnv() { - v.automaticEnvApplied = true -} - -// SetEnvKeyReplacer sets the strings.Replacer on the viper object -// Useful for mapping an environmental variable to a key that does -// not match it. -func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) } - -func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) { - v.envKeyReplacer = r -} - -// RegisterAlias creates an alias that provides another accessor for the same key. -// This enables one to change a name without breaking the application. -func RegisterAlias(alias, key string) { v.RegisterAlias(alias, key) } - -func (v *Viper) RegisterAlias(alias, key string) { - v.registerAlias(alias, strings.ToLower(key)) -} - -func (v *Viper) registerAlias(alias, key string) { - alias = strings.ToLower(alias) - if alias != key && alias != v.realKey(key) { - _, exists := v.aliases[alias] - - if !exists { - // if we alias something that exists in one of the maps to another - // name, we'll never be able to get that value using the original - // name, so move the config value to the new realkey. - if val, ok := v.config[alias]; ok { - delete(v.config, alias) - v.config[key] = val - } - if val, ok := v.kvstore[alias]; ok { - delete(v.kvstore, alias) - v.kvstore[key] = val - } - if val, ok := v.defaults[alias]; ok { - delete(v.defaults, alias) - v.defaults[key] = val - } - if val, ok := v.override[alias]; ok { - delete(v.override, alias) - v.override[key] = val - } - v.aliases[alias] = key - } - } else { - v.logger.Warn("creating circular reference alias", "alias", alias, "key", key, "real_key", v.realKey(key)) - } -} - -func (v *Viper) realKey(key string) string { - newkey, exists := v.aliases[key] - if exists { - v.logger.Debug("key is an alias", "alias", key, "to", newkey) - - return v.realKey(newkey) - } - return key -} - -// InConfig checks to see if the given key (or an alias) is in the config file. -func InConfig(key string) bool { return v.InConfig(key) } - -func (v *Viper) InConfig(key string) bool { - lcaseKey := strings.ToLower(key) - - // if the requested key is an alias, then return the proper key - lcaseKey = v.realKey(lcaseKey) - path := strings.Split(lcaseKey, v.keyDelim) - - return v.searchIndexableWithPathPrefixes(v.config, path) != nil -} - -// SetDefault sets the default value for this key. -// SetDefault is case-insensitive for a key. -// Default only used when no value is provided by the user via flag, config or ENV. -func SetDefault(key string, value any) { v.SetDefault(key, value) } - -func (v *Viper) SetDefault(key string, value any) { - // If alias passed in, then set the proper default - key = v.realKey(strings.ToLower(key)) - value = toCaseInsensitiveValue(value) - - path := strings.Split(key, v.keyDelim) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(v.defaults, path[0:len(path)-1]) - - // set innermost value - deepestMap[lastKey] = value -} - -// Set sets the value for the key in the override register. -// Set is case-insensitive for a key. -// Will be used instead of values obtained via -// flags, config file, ENV, default, or key/value store. -func Set(key string, value any) { v.Set(key, value) } - -func (v *Viper) Set(key string, value any) { - // If alias passed in, then set the proper override - key = v.realKey(strings.ToLower(key)) - value = toCaseInsensitiveValue(value) - - path := strings.Split(key, v.keyDelim) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(v.override, path[0:len(path)-1]) - - // set innermost value - deepestMap[lastKey] = value -} - -// ReadInConfig will discover and load the configuration file from disk -// and key/value stores, searching in one of the defined paths. -func ReadInConfig() error { return v.ReadInConfig() } - -func (v *Viper) ReadInConfig() error { - v.logger.Info("attempting to read in config file") - filename, err := v.getConfigFile() - if err != nil { - return err - } - - if !stringInSlice(v.getConfigType(), SupportedExts) { - return UnsupportedConfigError(v.getConfigType()) - } - - v.logger.Debug("reading file", "file", filename) - file, err := afero.ReadFile(v.fs, filename) - if err != nil { - return err - } - - config := make(map[string]any) - - err = v.unmarshalReader(bytes.NewReader(file), config) - if err != nil { - return err - } - - v.config = config - return nil -} - -// MergeInConfig merges a new configuration with an existing config. -func MergeInConfig() error { return v.MergeInConfig() } - -func (v *Viper) MergeInConfig() error { - v.logger.Info("attempting to merge in config file") - filename, err := v.getConfigFile() - if err != nil { - return err - } - - if !stringInSlice(v.getConfigType(), SupportedExts) { - return UnsupportedConfigError(v.getConfigType()) - } - - file, err := afero.ReadFile(v.fs, filename) - if err != nil { - return err - } - - return v.MergeConfig(bytes.NewReader(file)) -} - -// ReadConfig will read a configuration file, setting existing keys to nil if the -// key does not exist in the file. -func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } - -func (v *Viper) ReadConfig(in io.Reader) error { - v.config = make(map[string]any) - return v.unmarshalReader(in, v.config) -} - -// MergeConfig merges a new configuration with an existing config. -func MergeConfig(in io.Reader) error { return v.MergeConfig(in) } - -func (v *Viper) MergeConfig(in io.Reader) error { - cfg := make(map[string]any) - if err := v.unmarshalReader(in, cfg); err != nil { - return err - } - return v.MergeConfigMap(cfg) -} - -// MergeConfigMap merges the configuration from the map given with an existing config. -// Note that the map given may be modified. -func MergeConfigMap(cfg map[string]any) error { return v.MergeConfigMap(cfg) } - -func (v *Viper) MergeConfigMap(cfg map[string]any) error { - if v.config == nil { - v.config = make(map[string]any) - } - insensitiviseMap(cfg) - mergeMaps(cfg, v.config, nil) - return nil -} - -// WriteConfig writes the current configuration to a file. -func WriteConfig() error { return v.WriteConfig() } - -func (v *Viper) WriteConfig() error { - filename, err := v.getConfigFile() - if err != nil { - return err - } - return v.writeConfig(filename, true) -} - -// SafeWriteConfig writes current configuration to file only if the file does not exist. -func SafeWriteConfig() error { return v.SafeWriteConfig() } - -func (v *Viper) SafeWriteConfig() error { - if len(v.configPaths) < 1 { - return errors.New("missing configuration for 'configPath'") - } - return v.SafeWriteConfigAs(filepath.Join(v.configPaths[0], v.configName+"."+v.configType)) -} - -// WriteConfigAs writes current configuration to a given filename. -func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) } - -func (v *Viper) WriteConfigAs(filename string) error { - return v.writeConfig(filename, true) -} - -// SafeWriteConfigAs writes current configuration to a given filename if it does not exist. -func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) } - -func (v *Viper) SafeWriteConfigAs(filename string) error { - alreadyExists, err := afero.Exists(v.fs, filename) - if alreadyExists && err == nil { - return ConfigFileAlreadyExistsError(filename) - } - return v.writeConfig(filename, false) -} - -func (v *Viper) writeConfig(filename string, force bool) error { - v.logger.Info("attempting to write configuration to file") - - var configType string - - ext := filepath.Ext(filename) - if ext != "" && ext != filepath.Base(filename) { - configType = ext[1:] - } else { - configType = v.configType - } - if configType == "" { - return fmt.Errorf("config type could not be determined for %s", filename) - } - - if !stringInSlice(configType, SupportedExts) { - return UnsupportedConfigError(configType) - } - if v.config == nil { - v.config = make(map[string]any) - } - flags := os.O_CREATE | os.O_TRUNC | os.O_WRONLY - if !force { - flags |= os.O_EXCL - } - f, err := v.fs.OpenFile(filename, flags, v.configPermissions) - if err != nil { - return err - } - defer f.Close() - - if err := v.marshalWriter(f, configType); err != nil { - return err - } - - return f.Sync() -} - -// Unmarshal a Reader into a map. -// Should probably be an unexported function. -func unmarshalReader(in io.Reader, c map[string]any) error { - return v.unmarshalReader(in, c) -} - -func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error { - buf := new(bytes.Buffer) - buf.ReadFrom(in) - - switch format := strings.ToLower(v.getConfigType()); format { - case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "properties", "props", "prop", "dotenv", "env": - err := v.decoderRegistry.Decode(format, buf.Bytes(), c) - if err != nil { - return ConfigParseError{err} - } - } - - insensitiviseMap(c) - return nil -} - -// Marshal a map into Writer. -func (v *Viper) marshalWriter(f afero.File, configType string) error { - c := v.AllSettings() - switch configType { - case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "prop", "props", "properties", "dotenv", "env": - b, err := v.encoderRegistry.Encode(configType, c) - if err != nil { - return ConfigMarshalError{err} - } - - _, err = f.WriteString(string(b)) - if err != nil { - return ConfigMarshalError{err} - } - } - return nil -} - -func keyExists(k string, m map[string]any) string { - lk := strings.ToLower(k) - for mk := range m { - lmk := strings.ToLower(mk) - if lmk == lk { - return mk - } - } - return "" -} - -func castToMapStringInterface( - src map[any]any, -) map[string]any { - tgt := map[string]any{} - for k, v := range src { - tgt[fmt.Sprintf("%v", k)] = v - } - return tgt -} - -func castMapStringSliceToMapInterface(src map[string][]string) map[string]any { - tgt := map[string]any{} - for k, v := range src { - tgt[k] = v - } - return tgt -} - -func castMapStringToMapInterface(src map[string]string) map[string]any { - tgt := map[string]any{} - for k, v := range src { - tgt[k] = v - } - return tgt -} - -func castMapFlagToMapInterface(src map[string]FlagValue) map[string]any { - tgt := map[string]any{} - for k, v := range src { - tgt[k] = v - } - return tgt -} - -// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's -// insistence on parsing nested structures as `map[any]any` -// instead of using a `string` as the key for nest structures beyond one level -// deep. Both map types are supported as there is a go-yaml fork that uses -// `map[string]any` instead. -func mergeMaps(src, tgt map[string]any, itgt map[any]any) { - for sk, sv := range src { - tk := keyExists(sk, tgt) - if tk == "" { - v.logger.Debug("", "tk", "\"\"", fmt.Sprintf("tgt[%s]", sk), sv) - tgt[sk] = sv - if itgt != nil { - itgt[sk] = sv - } - continue - } - - tv, ok := tgt[tk] - if !ok { - v.logger.Debug("", fmt.Sprintf("ok[%s]", tk), false, fmt.Sprintf("tgt[%s]", sk), sv) - tgt[sk] = sv - if itgt != nil { - itgt[sk] = sv - } - continue - } - - svType := reflect.TypeOf(sv) - tvType := reflect.TypeOf(tv) - - v.logger.Debug( - "processing", - "key", sk, - "st", svType, - "tt", tvType, - "sv", sv, - "tv", tv, - ) - - switch ttv := tv.(type) { - case map[any]any: - v.logger.Debug("merging maps (must convert)") - tsv, ok := sv.(map[any]any) - if !ok { - v.logger.Error( - "Could not cast sv to map[any]any", - "key", sk, - "st", svType, - "tt", tvType, - "sv", sv, - "tv", tv, - ) - continue - } - - ssv := castToMapStringInterface(tsv) - stv := castToMapStringInterface(ttv) - mergeMaps(ssv, stv, ttv) - case map[string]any: - v.logger.Debug("merging maps") - tsv, ok := sv.(map[string]any) - if !ok { - v.logger.Error( - "Could not cast sv to map[string]any", - "key", sk, - "st", svType, - "tt", tvType, - "sv", sv, - "tv", tv, - ) - continue - } - mergeMaps(tsv, ttv, nil) - default: - v.logger.Debug("setting value") - tgt[tk] = sv - if itgt != nil { - itgt[tk] = sv - } - } - } -} - -// ReadRemoteConfig attempts to get configuration from a remote source -// and read it in the remote configuration registry. -func ReadRemoteConfig() error { return v.ReadRemoteConfig() } - -func (v *Viper) ReadRemoteConfig() error { - return v.getKeyValueConfig() -} - -func WatchRemoteConfig() error { return v.WatchRemoteConfig() } -func (v *Viper) WatchRemoteConfig() error { - return v.watchKeyValueConfig() -} - -func (v *Viper) WatchRemoteConfigOnChannel() error { - return v.watchKeyValueConfigOnChannel() -} - -// Retrieve the first found remote configuration. -func (v *Viper) getKeyValueConfig() error { - if RemoteConfig == nil { - return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") - } - - if len(v.remoteProviders) == 0 { - return RemoteConfigError("No Remote Providers") - } - - for _, rp := range v.remoteProviders { - val, err := v.getRemoteConfig(rp) - if err != nil { - v.logger.Error(fmt.Errorf("get remote config: %w", err).Error()) - - continue - } - - v.kvstore = val - - return nil - } - return RemoteConfigError("No Files Found") -} - -func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]any, error) { - reader, err := RemoteConfig.Get(provider) - if err != nil { - return nil, err - } - err = v.unmarshalReader(reader, v.kvstore) - return v.kvstore, err -} - -// Retrieve the first found remote configuration. -func (v *Viper) watchKeyValueConfigOnChannel() error { - if len(v.remoteProviders) == 0 { - return RemoteConfigError("No Remote Providers") - } - - for _, rp := range v.remoteProviders { - respc, _ := RemoteConfig.WatchChannel(rp) - // Todo: Add quit channel - go func(rc <-chan *RemoteResponse) { - for { - b := <-rc - reader := bytes.NewReader(b.Value) - v.unmarshalReader(reader, v.kvstore) - } - }(respc) - return nil - } - return RemoteConfigError("No Files Found") -} - -// Retrieve the first found remote configuration. -func (v *Viper) watchKeyValueConfig() error { - if len(v.remoteProviders) == 0 { - return RemoteConfigError("No Remote Providers") - } - - for _, rp := range v.remoteProviders { - val, err := v.watchRemoteConfig(rp) - if err != nil { - v.logger.Error(fmt.Errorf("watch remote config: %w", err).Error()) - - continue - } - v.kvstore = val - return nil - } - return RemoteConfigError("No Files Found") -} - -func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]any, error) { - reader, err := RemoteConfig.Watch(provider) - if err != nil { - return nil, err - } - err = v.unmarshalReader(reader, v.kvstore) - return v.kvstore, err -} - -// AllKeys returns all keys holding a value, regardless of where they are set. -// Nested keys are returned with a v.keyDelim separator. -func AllKeys() []string { return v.AllKeys() } - -func (v *Viper) AllKeys() []string { - m := map[string]bool{} - // add all paths, by order of descending priority to ensure correct shadowing - m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "") - m = v.flattenAndMergeMap(m, v.override, "") - m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags)) - m = v.mergeFlatMap(m, castMapStringSliceToMapInterface(v.env)) - m = v.flattenAndMergeMap(m, v.config, "") - m = v.flattenAndMergeMap(m, v.kvstore, "") - m = v.flattenAndMergeMap(m, v.defaults, "") - - // convert set of paths to list - a := make([]string, 0, len(m)) - for x := range m { - a = append(a, x) - } - return a -} - -// flattenAndMergeMap recursively flattens the given map into a map[string]bool -// of key paths (used as a set, easier to manipulate than a []string): -// - each path is merged into a single key string, delimited with v.keyDelim -// - if a path is shadowed by an earlier value in the initial shadow map, -// it is skipped. -// -// The resulting set of paths is merged to the given shadow set at the same time. -func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]any, prefix string) map[string]bool { - if shadow != nil && prefix != "" && shadow[prefix] { - // prefix is shadowed => nothing more to flatten - return shadow - } - if shadow == nil { - shadow = make(map[string]bool) - } - - var m2 map[string]any - if prefix != "" { - prefix += v.keyDelim - } - for k, val := range m { - fullKey := prefix + k - switch val := val.(type) { - case map[string]any: - m2 = val - case map[any]any: - m2 = cast.ToStringMap(val) - default: - // immediate value - shadow[strings.ToLower(fullKey)] = true - continue - } - // recursively merge to shadow map - shadow = v.flattenAndMergeMap(shadow, m2, fullKey) - } - return shadow -} - -// mergeFlatMap merges the given maps, excluding values of the second map -// shadowed by values from the first map. -func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]any) map[string]bool { - // scan keys -outer: - for k := range m { - path := strings.Split(k, v.keyDelim) - // scan intermediate paths - var parentKey string - for i := 1; i < len(path); i++ { - parentKey = strings.Join(path[0:i], v.keyDelim) - if shadow[parentKey] { - // path is shadowed, continue - continue outer - } - } - // add key - shadow[strings.ToLower(k)] = true - } - return shadow -} - -// AllSettings merges all settings and returns them as a map[string]any. -func AllSettings() map[string]any { return v.AllSettings() } - -func (v *Viper) AllSettings() map[string]any { - return v.getSettings(v.AllKeys()) -} - -func (v *Viper) getSettings(keys []string) map[string]any { - m := map[string]any{} - // start from the list of keys, and construct the map one value at a time - for _, k := range keys { - value := v.Get(k) - if value == nil { - // should not happen, since AllKeys() returns only keys holding a value, - // check just in case anything changes - continue - } - path := strings.Split(k, v.keyDelim) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(m, path[0:len(path)-1]) - // set innermost value - deepestMap[lastKey] = value - } - return m -} - -// SetFs sets the filesystem to use to read configuration. -func SetFs(fs afero.Fs) { v.SetFs(fs) } - -func (v *Viper) SetFs(fs afero.Fs) { - v.fs = fs -} - -// SetConfigName sets name for the config file. -// Does not include extension. -func SetConfigName(in string) { v.SetConfigName(in) } - -func (v *Viper) SetConfigName(in string) { - if in != "" { - v.configName = in - v.configFile = "" - } -} - -// SetConfigType sets the type of the configuration returned by the -// remote source, e.g. "json". -func SetConfigType(in string) { v.SetConfigType(in) } - -func (v *Viper) SetConfigType(in string) { - if in != "" { - v.configType = in - } -} - -// SetConfigPermissions sets the permissions for the config file. -func SetConfigPermissions(perm os.FileMode) { v.SetConfigPermissions(perm) } - -func (v *Viper) SetConfigPermissions(perm os.FileMode) { - v.configPermissions = perm.Perm() -} - -// IniLoadOptions sets the load options for ini parsing. -func IniLoadOptions(in ini.LoadOptions) Option { - return optionFunc(func(v *Viper) { - v.iniLoadOptions = in - }) -} - -func (v *Viper) getConfigType() string { - if v.configType != "" { - return v.configType - } - - cf, err := v.getConfigFile() - if err != nil { - return "" - } - - ext := filepath.Ext(cf) - - if len(ext) > 1 { - return ext[1:] - } - - return "" -} - -func (v *Viper) getConfigFile() (string, error) { - if v.configFile == "" { - cf, err := v.findConfigFile() - if err != nil { - return "", err - } - v.configFile = cf - } - return v.configFile, nil -} - -// Debug prints all configuration registries for debugging -// purposes. -func Debug() { v.Debug() } -func DebugTo(w io.Writer) { v.DebugTo(w) } - -func (v *Viper) Debug() { v.DebugTo(os.Stdout) } - -func (v *Viper) DebugTo(w io.Writer) { - fmt.Fprintf(w, "Aliases:\n%#v\n", v.aliases) - fmt.Fprintf(w, "Override:\n%#v\n", v.override) - fmt.Fprintf(w, "PFlags:\n%#v\n", v.pflags) - fmt.Fprintf(w, "Env:\n%#v\n", v.env) - fmt.Fprintf(w, "Key/Value Store:\n%#v\n", v.kvstore) - fmt.Fprintf(w, "Config:\n%#v\n", v.config) - fmt.Fprintf(w, "Defaults:\n%#v\n", v.defaults) -} diff --git a/vendor/github.com/subosito/gotenv/.env b/vendor/github.com/subosito/gotenv/.env deleted file mode 100644 index 6405eca71f..0000000000 --- a/vendor/github.com/subosito/gotenv/.env +++ /dev/null @@ -1 +0,0 @@ -HELLO=world diff --git a/vendor/github.com/subosito/gotenv/.env.invalid b/vendor/github.com/subosito/gotenv/.env.invalid deleted file mode 100644 index 016d5e0cea..0000000000 --- a/vendor/github.com/subosito/gotenv/.env.invalid +++ /dev/null @@ -1 +0,0 @@ -lol$wut diff --git a/vendor/github.com/subosito/gotenv/.gitignore b/vendor/github.com/subosito/gotenv/.gitignore deleted file mode 100644 index 7db37c1db4..0000000000 --- a/vendor/github.com/subosito/gotenv/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.test -*.out -annotate.json -profile.cov diff --git a/vendor/github.com/subosito/gotenv/.golangci.yaml b/vendor/github.com/subosito/gotenv/.golangci.yaml deleted file mode 100644 index 8c82a762e2..0000000000 --- a/vendor/github.com/subosito/gotenv/.golangci.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# Options for analysis running. -run: - timeout: 1m - -linters-settings: - gofmt: - simplify: true diff --git a/vendor/github.com/subosito/gotenv/CHANGELOG.md b/vendor/github.com/subosito/gotenv/CHANGELOG.md deleted file mode 100644 index c4fe7d3268..0000000000 --- a/vendor/github.com/subosito/gotenv/CHANGELOG.md +++ /dev/null @@ -1,105 +0,0 @@ -# Changelog - -## [1.5.0] - 2023-08-15 - -### Fixed - -- Use io.Reader instead of custom Reader - -## [1.5.0] - 2023-08-15 - -### Added - -- Support for reading UTF16 files - -### Fixed - -- Scanner error handling -- Reader error handling - -## [1.4.2] - 2023-01-11 - -### Fixed - -- Env var initialization - -### Changed - -- More consitent line splitting - -## [1.4.1] - 2022-08-23 - -### Fixed - -- Missing file close - -### Changed - -- Updated dependencies - -## [1.4.0] - 2022-06-02 - -### Added - -- Add `Marshal` and `Unmarshal` helpers - -### Changed - -- The CI will now run a linter and the tests on PRs. - -## [1.3.0] - 2022-05-23 - -### Added - -- Support = within double-quoted strings -- Add support for multiline values - -### Changed - -- `OverLoad` prefer environment variables over local variables - -## [1.2.0] - 2019-08-03 - -### Added - -- Add `Must` helper to raise an error as panic. It can be used with `Load` and `OverLoad`. -- Add more tests to be 100% coverage. -- Add CHANGELOG -- Add more OS for the test: OSX and Windows - -### Changed - -- Reduce complexity and improve source code for having `A+` score in [goreportcard](https://goreportcard.com/report/github.com/subosito/gotenv). -- Updated README with mentions to all available functions - -### Removed - -- Remove `ErrFormat` -- Remove `MustLoad` and `MustOverload`, replaced with `Must` helper. - -## [1.1.1] - 2018-06-05 - -### Changed - -- Replace `os.Getenv` with `os.LookupEnv` to ensure that the environment variable is not set, by [radding](https://github.com/radding) - -## [1.1.0] - 2017-03-20 - -### Added - -- Supports carriage return in env -- Handle files with UTF-8 BOM - -### Changed - -- Whitespace handling - -### Fixed - -- Incorrect variable expansion -- Handling escaped '$' characters - -## [1.0.0] - 2014-10-05 - -First stable release. - diff --git a/vendor/github.com/subosito/gotenv/LICENSE b/vendor/github.com/subosito/gotenv/LICENSE deleted file mode 100644 index f64ccaedc3..0000000000 --- a/vendor/github.com/subosito/gotenv/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Alif Rachmawadi - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/subosito/gotenv/README.md b/vendor/github.com/subosito/gotenv/README.md deleted file mode 100644 index fc9616e3b0..0000000000 --- a/vendor/github.com/subosito/gotenv/README.md +++ /dev/null @@ -1,129 +0,0 @@ -# gotenv - -[![Build Status](https://github.com/subosito/gotenv/workflows/Go%20workflow/badge.svg)](https://github.com/subosito/gotenv/actions) -[![Coverage Status](https://badgen.net/codecov/c/github/subosito/gotenv)](https://codecov.io/gh/subosito/gotenv) -[![Go Report Card](https://goreportcard.com/badge/github.com/subosito/gotenv)](https://goreportcard.com/report/github.com/subosito/gotenv) -[![GoDoc](https://godoc.org/github.com/subosito/gotenv?status.svg)](https://godoc.org/github.com/subosito/gotenv) - -Load environment variables from `.env` or `io.Reader` in Go. - -## Usage - -Put the gotenv package on your `import` statement: - -```go -import "github.com/subosito/gotenv" -``` - -To modify your app environment variables, `gotenv` expose 2 main functions: - -- `gotenv.Load` -- `gotenv.Apply` - -By default, `gotenv.Load` will look for a file called `.env` in the current working directory. - -Behind the scene, it will then load `.env` file and export the valid variables to the environment variables. Make sure you call the method as soon as possible to ensure it loads all variables, say, put it on `init()` function. - -Once loaded you can use `os.Getenv()` to get the value of the variable. - -Let's say you have `.env` file: - -```sh -APP_ID=1234567 -APP_SECRET=abcdef -``` - -Here's the example of your app: - -```go -package main - -import ( - "github.com/subosito/gotenv" - "log" - "os" -) - -func init() { - gotenv.Load() -} - -func main() { - log.Println(os.Getenv("APP_ID")) // "1234567" - log.Println(os.Getenv("APP_SECRET")) // "abcdef" -} -``` - -You can also load other than `.env` file if you wish. Just supply filenames when calling `Load()`. It will load them in order and the first value set for a variable will win.: - -```go -gotenv.Load(".env.production", "credentials") -``` - -While `gotenv.Load` loads entries from `.env` file, `gotenv.Apply` allows you to use any `io.Reader`: - -```go -gotenv.Apply(strings.NewReader("APP_ID=1234567")) - -log.Println(os.Getenv("APP_ID")) -// Output: "1234567" -``` - -Both `gotenv.Load` and `gotenv.Apply` **DO NOT** overrides existing environment variables. If you want to override existing ones, you can see section below. - -### Environment Overrides - -Besides above functions, `gotenv` also provides another functions that overrides existing: - -- `gotenv.OverLoad` -- `gotenv.OverApply` - -Here's the example of this overrides behavior: - -```go -os.Setenv("HELLO", "world") - -// NOTE: using Apply existing value will be reserved -gotenv.Apply(strings.NewReader("HELLO=universe")) -fmt.Println(os.Getenv("HELLO")) -// Output: "world" - -// NOTE: using OverApply existing value will be overridden -gotenv.OverApply(strings.NewReader("HELLO=universe")) -fmt.Println(os.Getenv("HELLO")) -// Output: "universe" -``` - -### Throw a Panic - -Both `gotenv.Load` and `gotenv.OverLoad` returns an error on something wrong occurred, like your env file is not exist, and so on. To make it easier to use, `gotenv` also provides `gotenv.Must` helper, to let it panic when an error returned. - -```go -err := gotenv.Load(".env-is-not-exist") -fmt.Println("error", err) -// error: open .env-is-not-exist: no such file or directory - -gotenv.Must(gotenv.Load, ".env-is-not-exist") -// it will throw a panic -// panic: open .env-is-not-exist: no such file or directory -``` - -### Another Scenario - -Just in case you want to parse environment variables from any `io.Reader`, gotenv keeps its `Parse` and `StrictParse` function as public API so you can use that. - -```go -// import "strings" - -pairs := gotenv.Parse(strings.NewReader("FOO=test\nBAR=$FOO")) -// gotenv.Env{"FOO": "test", "BAR": "test"} - -pairs, err := gotenv.StrictParse(strings.NewReader(`FOO="bar"`)) -// gotenv.Env{"FOO": "bar"} -``` - -`Parse` ignores invalid lines and returns `Env` of valid environment variables, while `StrictParse` returns an error for invalid lines. - -## Notes - -The gotenv package is a Go port of [`dotenv`](https://github.com/bkeepers/dotenv) project with some additions made for Go. For general features, it aims to be compatible as close as possible. diff --git a/vendor/github.com/subosito/gotenv/gotenv.go b/vendor/github.com/subosito/gotenv/gotenv.go deleted file mode 100644 index 1191d35874..0000000000 --- a/vendor/github.com/subosito/gotenv/gotenv.go +++ /dev/null @@ -1,409 +0,0 @@ -// Package gotenv provides functionality to dynamically load the environment variables -package gotenv - -import ( - "bufio" - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - - "golang.org/x/text/encoding/unicode" - "golang.org/x/text/transform" -) - -const ( - // Pattern for detecting valid line format - linePattern = `\A\s*(?:export\s+)?([\w\.]+)(?:\s*=\s*|:\s+?)('(?:\'|[^'])*'|"(?:\"|[^"])*"|[^#\n]+)?\s*(?:\s*\#.*)?\z` - - // Pattern for detecting valid variable within a value - variablePattern = `(\\)?(\$)(\{?([A-Z0-9_]+)?\}?)` -) - -// Byte order mark character -var ( - bomUTF8 = []byte("\xEF\xBB\xBF") - bomUTF16LE = []byte("\xFF\xFE") - bomUTF16BE = []byte("\xFE\xFF") -) - -// Env holds key/value pair of valid environment variable -type Env map[string]string - -// Load is a function to load a file or multiple files and then export the valid variables into environment variables if they do not exist. -// When it's called with no argument, it will load `.env` file on the current path and set the environment variables. -// Otherwise, it will loop over the filenames parameter and set the proper environment variables. -func Load(filenames ...string) error { - return loadenv(false, filenames...) -} - -// OverLoad is a function to load a file or multiple files and then export and override the valid variables into environment variables. -func OverLoad(filenames ...string) error { - return loadenv(true, filenames...) -} - -// Must is wrapper function that will panic when supplied function returns an error. -func Must(fn func(filenames ...string) error, filenames ...string) { - if err := fn(filenames...); err != nil { - panic(err.Error()) - } -} - -// Apply is a function to load an io Reader then export the valid variables into environment variables if they do not exist. -func Apply(r io.Reader) error { - return parset(r, false) -} - -// OverApply is a function to load an io Reader then export and override the valid variables into environment variables. -func OverApply(r io.Reader) error { - return parset(r, true) -} - -func loadenv(override bool, filenames ...string) error { - if len(filenames) == 0 { - filenames = []string{".env"} - } - - for _, filename := range filenames { - f, err := os.Open(filename) - if err != nil { - return err - } - - err = parset(f, override) - f.Close() - if err != nil { - return err - } - } - - return nil -} - -// parse and set :) -func parset(r io.Reader, override bool) error { - env, err := strictParse(r, override) - if err != nil { - return err - } - - for key, val := range env { - setenv(key, val, override) - } - - return nil -} - -func setenv(key, val string, override bool) { - if override { - os.Setenv(key, val) - } else { - if _, present := os.LookupEnv(key); !present { - os.Setenv(key, val) - } - } -} - -// Parse is a function to parse line by line any io.Reader supplied and returns the valid Env key/value pair of valid variables. -// It expands the value of a variable from the environment variable but does not set the value to the environment itself. -// This function is skipping any invalid lines and only processing the valid one. -func Parse(r io.Reader) Env { - env, _ := strictParse(r, false) - return env -} - -// StrictParse is a function to parse line by line any io.Reader supplied and returns the valid Env key/value pair of valid variables. -// It expands the value of a variable from the environment variable but does not set the value to the environment itself. -// This function is returning an error if there are any invalid lines. -func StrictParse(r io.Reader) (Env, error) { - return strictParse(r, false) -} - -// Read is a function to parse a file line by line and returns the valid Env key/value pair of valid variables. -// It expands the value of a variable from the environment variable but does not set the value to the environment itself. -// This function is skipping any invalid lines and only processing the valid one. -func Read(filename string) (Env, error) { - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - return strictParse(f, false) -} - -// Unmarshal reads a string line by line and returns the valid Env key/value pair of valid variables. -// It expands the value of a variable from the environment variable but does not set the value to the environment itself. -// This function is returning an error if there are any invalid lines. -func Unmarshal(str string) (Env, error) { - return strictParse(strings.NewReader(str), false) -} - -// Marshal outputs the given environment as a env file. -// Variables will be sorted by name. -func Marshal(env Env) (string, error) { - lines := make([]string, 0, len(env)) - for k, v := range env { - if d, err := strconv.Atoi(v); err == nil { - lines = append(lines, fmt.Sprintf(`%s=%d`, k, d)) - } else { - lines = append(lines, fmt.Sprintf(`%s=%q`, k, v)) - } - } - sort.Strings(lines) - return strings.Join(lines, "\n"), nil -} - -// Write serializes the given environment and writes it to a file -func Write(env Env, filename string) error { - content, err := Marshal(env) - if err != nil { - return err - } - // ensure the path exists - if err := os.MkdirAll(filepath.Dir(filename), 0o775); err != nil { - return err - } - // create or truncate the file - file, err := os.Create(filename) - if err != nil { - return err - } - defer file.Close() - _, err = file.WriteString(content + "\n") - if err != nil { - return err - } - - return file.Sync() -} - -// splitLines is a valid SplitFunc for a bufio.Scanner. It will split lines on CR ('\r'), LF ('\n') or CRLF (any of the three sequences). -// If a CR is immediately followed by a LF, it is treated as a CRLF (one single line break). -func splitLines(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, bufio.ErrFinalToken - } - - idx := bytes.IndexAny(data, "\r\n") - switch { - case atEOF && idx < 0: - return len(data), data, bufio.ErrFinalToken - - case idx < 0: - return 0, nil, nil - } - - // consume CR or LF - eol := idx + 1 - // detect CRLF - if len(data) > eol && data[eol-1] == '\r' && data[eol] == '\n' { - eol++ - } - - return eol, data[:idx], nil -} - -func strictParse(r io.Reader, override bool) (Env, error) { - env := make(Env) - - buf := new(bytes.Buffer) - tee := io.TeeReader(r, buf) - - // There can be a maximum of 3 BOM bytes. - bomByteBuffer := make([]byte, 3) - _, err := tee.Read(bomByteBuffer) - if err != nil && err != io.EOF { - return env, err - } - - z := io.MultiReader(buf, r) - - // We chooes a different scanner depending on file encoding. - var scanner *bufio.Scanner - - if bytes.HasPrefix(bomByteBuffer, bomUTF8) { - scanner = bufio.NewScanner(transform.NewReader(z, unicode.UTF8BOM.NewDecoder())) - } else if bytes.HasPrefix(bomByteBuffer, bomUTF16LE) { - scanner = bufio.NewScanner(transform.NewReader(z, unicode.UTF16(unicode.LittleEndian, unicode.ExpectBOM).NewDecoder())) - } else if bytes.HasPrefix(bomByteBuffer, bomUTF16BE) { - scanner = bufio.NewScanner(transform.NewReader(z, unicode.UTF16(unicode.BigEndian, unicode.ExpectBOM).NewDecoder())) - } else { - scanner = bufio.NewScanner(z) - } - - scanner.Split(splitLines) - - for scanner.Scan() { - if err := scanner.Err(); err != nil { - return env, err - } - - line := strings.TrimSpace(scanner.Text()) - if line == "" || line[0] == '#' { - continue - } - - quote := "" - // look for the delimiter character - idx := strings.Index(line, "=") - if idx == -1 { - idx = strings.Index(line, ":") - } - // look for a quote character - if idx > 0 && idx < len(line)-1 { - val := strings.TrimSpace(line[idx+1:]) - if val[0] == '"' || val[0] == '\'' { - quote = val[:1] - // look for the closing quote character within the same line - idx = strings.LastIndex(strings.TrimSpace(val[1:]), quote) - if idx >= 0 && val[idx] != '\\' { - quote = "" - } - } - } - // look for the closing quote character - for quote != "" && scanner.Scan() { - l := scanner.Text() - line += "\n" + l - idx := strings.LastIndex(l, quote) - if idx > 0 && l[idx-1] == '\\' { - // foud a matching quote character but it's escaped - continue - } - if idx >= 0 { - // foud a matching quote - quote = "" - } - } - - if quote != "" { - return env, fmt.Errorf("missing quotes") - } - - err := parseLine(line, env, override) - if err != nil { - return env, err - } - } - - return env, scanner.Err() -} - -var ( - lineRgx = regexp.MustCompile(linePattern) - unescapeRgx = regexp.MustCompile(`\\([^$])`) - varRgx = regexp.MustCompile(variablePattern) -) - -func parseLine(s string, env Env, override bool) error { - rm := lineRgx.FindStringSubmatch(s) - - if len(rm) == 0 { - return checkFormat(s, env) - } - - key := strings.TrimSpace(rm[1]) - val := strings.TrimSpace(rm[2]) - - var hsq, hdq bool - - // check if the value is quoted - if l := len(val); l >= 2 { - l -= 1 - // has double quotes - hdq = val[0] == '"' && val[l] == '"' - // has single quotes - hsq = val[0] == '\'' && val[l] == '\'' - - // remove quotes '' or "" - if hsq || hdq { - val = val[1:l] - } - } - - if hdq { - val = strings.ReplaceAll(val, `\n`, "\n") - val = strings.ReplaceAll(val, `\r`, "\r") - - // Unescape all characters except $ so variables can be escaped properly - val = unescapeRgx.ReplaceAllString(val, "$1") - } - - if !hsq { - fv := func(s string) string { - return varReplacement(s, hsq, env, override) - } - val = varRgx.ReplaceAllStringFunc(val, fv) - } - - env[key] = val - return nil -} - -func parseExport(st string, env Env) error { - if strings.HasPrefix(st, "export") { - vs := strings.SplitN(st, " ", 2) - - if len(vs) > 1 { - if _, ok := env[vs[1]]; !ok { - return fmt.Errorf("line `%s` has an unset variable", st) - } - } - } - - return nil -} - -var varNameRgx = regexp.MustCompile(`(\$)(\{?([A-Z0-9_]+)\}?)`) - -func varReplacement(s string, hsq bool, env Env, override bool) string { - if s == "" { - return s - } - - if s[0] == '\\' { - // the dollar sign is escaped - return s[1:] - } - - if hsq { - return s - } - - mn := varNameRgx.FindStringSubmatch(s) - - if len(mn) == 0 { - return s - } - - v := mn[3] - - if replace, ok := os.LookupEnv(v); ok && !override { - return replace - } - - if replace, ok := env[v]; ok { - return replace - } - - return os.Getenv(v) -} - -func checkFormat(s string, env Env) error { - st := strings.TrimSpace(s) - - if st == "" || st[0] == '#' { - return nil - } - - if err := parseExport(st, env); err != nil { - return err - } - - return fmt.Errorf("line `%s` doesn't match format", s) -} diff --git a/vendor/github.com/titanous/rocacheck/LICENSE b/vendor/github.com/titanous/rocacheck/LICENSE deleted file mode 100644 index 7bdce481fa..0000000000 --- a/vendor/github.com/titanous/rocacheck/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -MIT License - -Copyright (c) 2017, Jonathan Rudenberg -Copyright (c) 2017, CRoCS, EnigmaBridge Ltd. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/titanous/rocacheck/README.md b/vendor/github.com/titanous/rocacheck/README.md deleted file mode 100644 index b8e765ea9c..0000000000 --- a/vendor/github.com/titanous/rocacheck/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# rocacheck [![GoDoc](https://godoc.org/github.com/titanous/rocacheck?status.svg)](https://godoc.org/github.com/titanous/rocacheck) - -Package rocacheck is a Go implementation of the [key fingerprint -algorithm](https://github.com/crocs-muni/roca) that checks if an RSA key was -generated by broken Infineon code and is vulnerable to factorization via the -[Return of Coppersmith's Attack -(ROCA)](https://crocs.fi.muni.cz/public/papers/rsa_ccs17) / CVE-2017-15361. diff --git a/vendor/github.com/titanous/rocacheck/rocacheck.go b/vendor/github.com/titanous/rocacheck/rocacheck.go deleted file mode 100644 index e813579bb8..0000000000 --- a/vendor/github.com/titanous/rocacheck/rocacheck.go +++ /dev/null @@ -1,52 +0,0 @@ -// Package rocacheck checks if a key was generated by broken Infineon code and -// is vulnerable to factorization via the Return of Coppersmith's Attack (ROCA) -// / CVE-2017-15361. -package rocacheck - -import ( - "crypto/rsa" - "math/big" -) - -type test struct { - Prime *big.Int - Fingerprints map[int64]struct{} -} - -var tests = make([]test, 17) - -func init() { - bigOne := big.NewInt(1) - n := &big.Int{} - // relations table from https://github.com/crocs-muni/roca/pull/40 - for i, r := range [][2]int64{ - {2, 11}, {6, 13}, {8, 17}, {9, 19}, {3, 37}, {26, 53}, {20, 61}, - {35, 71}, {24, 73}, {13, 79}, {6, 97}, {51, 103}, {53, 107}, - {54, 109}, {42, 127}, {50, 151}, {78, 157}, - } { - fps := make(map[int64]struct{}) - bp := big.NewInt(r[1]) - br := big.NewInt(r[0]) - for j := int64(0); j < r[1]; j++ { - if n.Exp(big.NewInt(j), br, bp).Cmp(bigOne) == 0 { - fps[j] = struct{}{} - } - } - tests[i] = test{ - Prime: big.NewInt(r[1]), - Fingerprints: fps, - } - } -} - -// IsWeak returns true if a RSA public key is vulnerable to Return of -// Coppersmith's Attack (ROCA). -func IsWeak(k *rsa.PublicKey) bool { - tmp := &big.Int{} - for _, t := range tests { - if _, ok := t.Fingerprints[tmp.Mod(k.N, t.Prime).Int64()]; !ok { - return false - } - } - return true -} diff --git a/vendor/github.com/vbatts/tar-split/LICENSE b/vendor/github.com/vbatts/tar-split/LICENSE deleted file mode 100644 index ca03685b15..0000000000 --- a/vendor/github.com/vbatts/tar-split/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2015 Vincent Batts, Raleigh, NC, USA - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this -list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors -may be used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/github.com/vbatts/tar-split/archive/tar/common.go deleted file mode 100644 index dee9e47e4a..0000000000 --- a/vendor/github.com/vbatts/tar-split/archive/tar/common.go +++ /dev/null @@ -1,723 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tar implements access to tar archives. -// -// Tape archives (tar) are a file format for storing a sequence of files that -// can be read and written in a streaming manner. -// This package aims to cover most variations of the format, -// including those produced by GNU and BSD tar tools. -package tar - -import ( - "errors" - "fmt" - "math" - "os" - "path" - "reflect" - "strconv" - "strings" - "time" -) - -// BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit -// architectures. If a large value is encountered when decoding, the result -// stored in Header will be the truncated version. - -var ( - ErrHeader = errors.New("archive/tar: invalid tar header") - ErrWriteTooLong = errors.New("archive/tar: write too long") - ErrFieldTooLong = errors.New("archive/tar: header field too long") - ErrWriteAfterClose = errors.New("archive/tar: write after close") - errMissData = errors.New("archive/tar: sparse file references non-existent data") - errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data") - errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole") -) - -type headerError []string - -func (he headerError) Error() string { - const prefix = "archive/tar: cannot encode header" - var ss []string - for _, s := range he { - if s != "" { - ss = append(ss, s) - } - } - if len(ss) == 0 { - return prefix - } - return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and ")) -} - -// Type flags for Header.Typeflag. -const ( - // Type '0' indicates a regular file. - TypeReg = '0' - TypeRegA = '\x00' // Deprecated: Use TypeReg instead. - - // Type '1' to '6' are header-only flags and may not have a data body. - TypeLink = '1' // Hard link - TypeSymlink = '2' // Symbolic link - TypeChar = '3' // Character device node - TypeBlock = '4' // Block device node - TypeDir = '5' // Directory - TypeFifo = '6' // FIFO node - - // Type '7' is reserved. - TypeCont = '7' - - // Type 'x' is used by the PAX format to store key-value records that - // are only relevant to the next file. - // This package transparently handles these types. - TypeXHeader = 'x' - - // Type 'g' is used by the PAX format to store key-value records that - // are relevant to all subsequent files. - // This package only supports parsing and composing such headers, - // but does not currently support persisting the global state across files. - TypeXGlobalHeader = 'g' - - // Type 'S' indicates a sparse file in the GNU format. - TypeGNUSparse = 'S' - - // Types 'L' and 'K' are used by the GNU format for a meta file - // used to store the path or link name for the next file. - // This package transparently handles these types. - TypeGNULongName = 'L' - TypeGNULongLink = 'K' -) - -// Keywords for PAX extended header records. -const ( - paxNone = "" // Indicates that no PAX key is suitable - paxPath = "path" - paxLinkpath = "linkpath" - paxSize = "size" - paxUid = "uid" - paxGid = "gid" - paxUname = "uname" - paxGname = "gname" - paxMtime = "mtime" - paxAtime = "atime" - paxCtime = "ctime" // Removed from later revision of PAX spec, but was valid - paxCharset = "charset" // Currently unused - paxComment = "comment" // Currently unused - - paxSchilyXattr = "SCHILY.xattr." - - // Keywords for GNU sparse files in a PAX extended header. - paxGNUSparse = "GNU.sparse." - paxGNUSparseNumBlocks = "GNU.sparse.numblocks" - paxGNUSparseOffset = "GNU.sparse.offset" - paxGNUSparseNumBytes = "GNU.sparse.numbytes" - paxGNUSparseMap = "GNU.sparse.map" - paxGNUSparseName = "GNU.sparse.name" - paxGNUSparseMajor = "GNU.sparse.major" - paxGNUSparseMinor = "GNU.sparse.minor" - paxGNUSparseSize = "GNU.sparse.size" - paxGNUSparseRealSize = "GNU.sparse.realsize" -) - -// basicKeys is a set of the PAX keys for which we have built-in support. -// This does not contain "charset" or "comment", which are both PAX-specific, -// so adding them as first-class features of Header is unlikely. -// Users can use the PAXRecords field to set it themselves. -var basicKeys = map[string]bool{ - paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true, - paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true, -} - -// A Header represents a single header in a tar archive. -// Some fields may not be populated. -// -// For forward compatibility, users that retrieve a Header from Reader.Next, -// mutate it in some ways, and then pass it back to Writer.WriteHeader -// should do so by creating a new Header and copying the fields -// that they are interested in preserving. -type Header struct { - // Typeflag is the type of header entry. - // The zero value is automatically promoted to either TypeReg or TypeDir - // depending on the presence of a trailing slash in Name. - Typeflag byte - - Name string // Name of file entry - Linkname string // Target name of link (valid for TypeLink or TypeSymlink) - - Size int64 // Logical file size in bytes - Mode int64 // Permission and mode bits - Uid int // User ID of owner - Gid int // Group ID of owner - Uname string // User name of owner - Gname string // Group name of owner - - // If the Format is unspecified, then Writer.WriteHeader rounds ModTime - // to the nearest second and ignores the AccessTime and ChangeTime fields. - // - // To use AccessTime or ChangeTime, specify the Format as PAX or GNU. - // To use sub-second resolution, specify the Format as PAX. - ModTime time.Time // Modification time - AccessTime time.Time // Access time (requires either PAX or GNU support) - ChangeTime time.Time // Change time (requires either PAX or GNU support) - - Devmajor int64 // Major device number (valid for TypeChar or TypeBlock) - Devminor int64 // Minor device number (valid for TypeChar or TypeBlock) - - // Xattrs stores extended attributes as PAX records under the - // "SCHILY.xattr." namespace. - // - // The following are semantically equivalent: - // h.Xattrs[key] = value - // h.PAXRecords["SCHILY.xattr."+key] = value - // - // When Writer.WriteHeader is called, the contents of Xattrs will take - // precedence over those in PAXRecords. - // - // Deprecated: Use PAXRecords instead. - Xattrs map[string]string - - // PAXRecords is a map of PAX extended header records. - // - // User-defined records should have keys of the following form: - // VENDOR.keyword - // Where VENDOR is some namespace in all uppercase, and keyword may - // not contain the '=' character (e.g., "GOLANG.pkg.version"). - // The key and value should be non-empty UTF-8 strings. - // - // When Writer.WriteHeader is called, PAX records derived from the - // other fields in Header take precedence over PAXRecords. - PAXRecords map[string]string - - // Format specifies the format of the tar header. - // - // This is set by Reader.Next as a best-effort guess at the format. - // Since the Reader liberally reads some non-compliant files, - // it is possible for this to be FormatUnknown. - // - // If the format is unspecified when Writer.WriteHeader is called, - // then it uses the first format (in the order of USTAR, PAX, GNU) - // capable of encoding this Header (see Format). - Format Format -} - -// sparseEntry represents a Length-sized fragment at Offset in the file. -type sparseEntry struct{ Offset, Length int64 } - -func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length } - -// A sparse file can be represented as either a sparseDatas or a sparseHoles. -// As long as the total size is known, they are equivalent and one can be -// converted to the other form and back. The various tar formats with sparse -// file support represent sparse files in the sparseDatas form. That is, they -// specify the fragments in the file that has data, and treat everything else as -// having zero bytes. As such, the encoding and decoding logic in this package -// deals with sparseDatas. -// -// However, the external API uses sparseHoles instead of sparseDatas because the -// zero value of sparseHoles logically represents a normal file (i.e., there are -// no holes in it). On the other hand, the zero value of sparseDatas implies -// that the file has no data in it, which is rather odd. -// -// As an example, if the underlying raw file contains the 10-byte data: -// var compactFile = "abcdefgh" -// -// And the sparse map has the following entries: -// var spd sparseDatas = []sparseEntry{ -// {Offset: 2, Length: 5}, // Data fragment for 2..6 -// {Offset: 18, Length: 3}, // Data fragment for 18..20 -// } -// var sph sparseHoles = []sparseEntry{ -// {Offset: 0, Length: 2}, // Hole fragment for 0..1 -// {Offset: 7, Length: 11}, // Hole fragment for 7..17 -// {Offset: 21, Length: 4}, // Hole fragment for 21..24 -// } -// -// Then the content of the resulting sparse file with a Header.Size of 25 is: -// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 -type ( - sparseDatas []sparseEntry - sparseHoles []sparseEntry -) - -// validateSparseEntries reports whether sp is a valid sparse map. -// It does not matter whether sp represents data fragments or hole fragments. -func validateSparseEntries(sp []sparseEntry, size int64) bool { - // Validate all sparse entries. These are the same checks as performed by - // the BSD tar utility. - if size < 0 { - return false - } - var pre sparseEntry - for _, cur := range sp { - switch { - case cur.Offset < 0 || cur.Length < 0: - return false // Negative values are never okay - case cur.Offset > math.MaxInt64-cur.Length: - return false // Integer overflow with large length - case cur.endOffset() > size: - return false // Region extends beyond the actual size - case pre.endOffset() > cur.Offset: - return false // Regions cannot overlap and must be in order - } - pre = cur - } - return true -} - -// alignSparseEntries mutates src and returns dst where each fragment's -// starting offset is aligned up to the nearest block edge, and each -// ending offset is aligned down to the nearest block edge. -// -// Even though the Go tar Reader and the BSD tar utility can handle entries -// with arbitrary offsets and lengths, the GNU tar utility can only handle -// offsets and lengths that are multiples of blockSize. -func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry { - dst := src[:0] - for _, s := range src { - pos, end := s.Offset, s.endOffset() - pos += blockPadding(+pos) // Round-up to nearest blockSize - if end != size { - end -= blockPadding(-end) // Round-down to nearest blockSize - } - if pos < end { - dst = append(dst, sparseEntry{Offset: pos, Length: end - pos}) - } - } - return dst -} - -// invertSparseEntries converts a sparse map from one form to the other. -// If the input is sparseHoles, then it will output sparseDatas and vice-versa. -// The input must have been already validated. -// -// This function mutates src and returns a normalized map where: -// * adjacent fragments are coalesced together -// * only the last fragment may be empty -// * the endOffset of the last fragment is the total size -func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry { - dst := src[:0] - var pre sparseEntry - for _, cur := range src { - if cur.Length == 0 { - continue // Skip empty fragments - } - pre.Length = cur.Offset - pre.Offset - if pre.Length > 0 { - dst = append(dst, pre) // Only add non-empty fragments - } - pre.Offset = cur.endOffset() - } - pre.Length = size - pre.Offset // Possibly the only empty fragment - return append(dst, pre) -} - -// fileState tracks the number of logical (includes sparse holes) and physical -// (actual in tar archive) bytes remaining for the current file. -// -// Invariant: LogicalRemaining >= PhysicalRemaining -type fileState interface { - LogicalRemaining() int64 - PhysicalRemaining() int64 -} - -// allowedFormats determines which formats can be used. -// The value returned is the logical OR of multiple possible formats. -// If the value is FormatUnknown, then the input Header cannot be encoded -// and an error is returned explaining why. -// -// As a by-product of checking the fields, this function returns paxHdrs, which -// contain all fields that could not be directly encoded. -// A value receiver ensures that this method does not mutate the source Header. -func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) { - format = FormatUSTAR | FormatPAX | FormatGNU - paxHdrs = make(map[string]string) - - var whyNoUSTAR, whyNoPAX, whyNoGNU string - var preferPAX bool // Prefer PAX over USTAR - verifyString := func(s string, size int, name, paxKey string) { - // NUL-terminator is optional for path and linkpath. - // Technically, it is required for uname and gname, - // but neither GNU nor BSD tar checks for it. - tooLong := len(s) > size - allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath - if hasNUL(s) || (tooLong && !allowLongGNU) { - whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s) - format.mustNotBe(FormatGNU) - } - if !isASCII(s) || tooLong { - canSplitUSTAR := paxKey == paxPath - if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok { - whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s) - format.mustNotBe(FormatUSTAR) - } - if paxKey == paxNone { - whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s) - format.mustNotBe(FormatPAX) - } else { - paxHdrs[paxKey] = s - } - } - if v, ok := h.PAXRecords[paxKey]; ok && v == s { - paxHdrs[paxKey] = v - } - } - verifyNumeric := func(n int64, size int, name, paxKey string) { - if !fitsInBase256(size, n) { - whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n) - format.mustNotBe(FormatGNU) - } - if !fitsInOctal(size, n) { - whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n) - format.mustNotBe(FormatUSTAR) - if paxKey == paxNone { - whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n) - format.mustNotBe(FormatPAX) - } else { - paxHdrs[paxKey] = strconv.FormatInt(n, 10) - } - } - if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) { - paxHdrs[paxKey] = v - } - } - verifyTime := func(ts time.Time, size int, name, paxKey string) { - if ts.IsZero() { - return // Always okay - } - if !fitsInBase256(size, ts.Unix()) { - whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts) - format.mustNotBe(FormatGNU) - } - isMtime := paxKey == paxMtime - fitsOctal := fitsInOctal(size, ts.Unix()) - if (isMtime && !fitsOctal) || !isMtime { - whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts) - format.mustNotBe(FormatUSTAR) - } - needsNano := ts.Nanosecond() != 0 - if !isMtime || !fitsOctal || needsNano { - preferPAX = true // USTAR may truncate sub-second measurements - if paxKey == paxNone { - whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts) - format.mustNotBe(FormatPAX) - } else { - paxHdrs[paxKey] = formatPAXTime(ts) - } - } - if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) { - paxHdrs[paxKey] = v - } - } - - // Check basic fields. - var blk block - v7 := blk.V7() - ustar := blk.USTAR() - gnu := blk.GNU() - verifyString(h.Name, len(v7.Name()), "Name", paxPath) - verifyString(h.Linkname, len(v7.LinkName()), "Linkname", paxLinkpath) - verifyString(h.Uname, len(ustar.UserName()), "Uname", paxUname) - verifyString(h.Gname, len(ustar.GroupName()), "Gname", paxGname) - verifyNumeric(h.Mode, len(v7.Mode()), "Mode", paxNone) - verifyNumeric(int64(h.Uid), len(v7.UID()), "Uid", paxUid) - verifyNumeric(int64(h.Gid), len(v7.GID()), "Gid", paxGid) - verifyNumeric(h.Size, len(v7.Size()), "Size", paxSize) - verifyNumeric(h.Devmajor, len(ustar.DevMajor()), "Devmajor", paxNone) - verifyNumeric(h.Devminor, len(ustar.DevMinor()), "Devminor", paxNone) - verifyTime(h.ModTime, len(v7.ModTime()), "ModTime", paxMtime) - verifyTime(h.AccessTime, len(gnu.AccessTime()), "AccessTime", paxAtime) - verifyTime(h.ChangeTime, len(gnu.ChangeTime()), "ChangeTime", paxCtime) - - // Check for header-only types. - var whyOnlyPAX, whyOnlyGNU string - switch h.Typeflag { - case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse: - // Exclude TypeLink and TypeSymlink, since they may reference directories. - if strings.HasSuffix(h.Name, "/") { - return FormatUnknown, nil, headerError{"filename may not have trailing slash"} - } - case TypeXHeader, TypeGNULongName, TypeGNULongLink: - return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"} - case TypeXGlobalHeader: - h2 := Header{Name: h.Name, Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format} - if !reflect.DeepEqual(h, h2) { - return FormatUnknown, nil, headerError{"only PAXRecords should be set for TypeXGlobalHeader"} - } - whyOnlyPAX = "only PAX supports TypeXGlobalHeader" - format.mayOnlyBe(FormatPAX) - } - if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 { - return FormatUnknown, nil, headerError{"negative size on header-only type"} - } - - // Check PAX records. - if len(h.Xattrs) > 0 { - for k, v := range h.Xattrs { - paxHdrs[paxSchilyXattr+k] = v - } - whyOnlyPAX = "only PAX supports Xattrs" - format.mayOnlyBe(FormatPAX) - } - if len(h.PAXRecords) > 0 { - for k, v := range h.PAXRecords { - switch _, exists := paxHdrs[k]; { - case exists: - continue // Do not overwrite existing records - case h.Typeflag == TypeXGlobalHeader: - paxHdrs[k] = v // Copy all records - case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse): - paxHdrs[k] = v // Ignore local records that may conflict - } - } - whyOnlyPAX = "only PAX supports PAXRecords" - format.mayOnlyBe(FormatPAX) - } - for k, v := range paxHdrs { - if !validPAXRecord(k, v) { - return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)} - } - } - - // TODO(dsnet): Re-enable this when adding sparse support. - // See https://golang.org/issue/22735 - /* - // Check sparse files. - if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse { - if isHeaderOnlyType(h.Typeflag) { - return FormatUnknown, nil, headerError{"header-only type cannot be sparse"} - } - if !validateSparseEntries(h.SparseHoles, h.Size) { - return FormatUnknown, nil, headerError{"invalid sparse holes"} - } - if h.Typeflag == TypeGNUSparse { - whyOnlyGNU = "only GNU supports TypeGNUSparse" - format.mayOnlyBe(FormatGNU) - } else { - whyNoGNU = "GNU supports sparse files only with TypeGNUSparse" - format.mustNotBe(FormatGNU) - } - whyNoUSTAR = "USTAR does not support sparse files" - format.mustNotBe(FormatUSTAR) - } - */ - - // Check desired format. - if wantFormat := h.Format; wantFormat != FormatUnknown { - if wantFormat.has(FormatPAX) && !preferPAX { - wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too - } - format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted - } - if format == FormatUnknown { - switch h.Format { - case FormatUSTAR: - err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU} - case FormatPAX: - err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU} - case FormatGNU: - err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX} - default: - err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU} - } - } - return format, paxHdrs, err -} - -// FileInfo returns an os.FileInfo for the Header. -func (h *Header) FileInfo() os.FileInfo { - return headerFileInfo{h} -} - -// headerFileInfo implements os.FileInfo. -type headerFileInfo struct { - h *Header -} - -func (fi headerFileInfo) Size() int64 { return fi.h.Size } -func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } -func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } -func (fi headerFileInfo) Sys() interface{} { return fi.h } - -// Name returns the base name of the file. -func (fi headerFileInfo) Name() string { - if fi.IsDir() { - return path.Base(path.Clean(fi.h.Name)) - } - return path.Base(fi.h.Name) -} - -// Mode returns the permission and mode bits for the headerFileInfo. -func (fi headerFileInfo) Mode() (mode os.FileMode) { - // Set file permission bits. - mode = os.FileMode(fi.h.Mode).Perm() - - // Set setuid, setgid and sticky bits. - if fi.h.Mode&c_ISUID != 0 { - mode |= os.ModeSetuid - } - if fi.h.Mode&c_ISGID != 0 { - mode |= os.ModeSetgid - } - if fi.h.Mode&c_ISVTX != 0 { - mode |= os.ModeSticky - } - - // Set file mode bits; clear perm, setuid, setgid, and sticky bits. - switch m := os.FileMode(fi.h.Mode) &^ 07777; m { - case c_ISDIR: - mode |= os.ModeDir - case c_ISFIFO: - mode |= os.ModeNamedPipe - case c_ISLNK: - mode |= os.ModeSymlink - case c_ISBLK: - mode |= os.ModeDevice - case c_ISCHR: - mode |= os.ModeDevice - mode |= os.ModeCharDevice - case c_ISSOCK: - mode |= os.ModeSocket - } - - switch fi.h.Typeflag { - case TypeSymlink: - mode |= os.ModeSymlink - case TypeChar: - mode |= os.ModeDevice - mode |= os.ModeCharDevice - case TypeBlock: - mode |= os.ModeDevice - case TypeDir: - mode |= os.ModeDir - case TypeFifo: - mode |= os.ModeNamedPipe - } - - return mode -} - -// sysStat, if non-nil, populates h from system-dependent fields of fi. -var sysStat func(fi os.FileInfo, h *Header) error - -const ( - // Mode constants from the USTAR spec: - // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06 - c_ISUID = 04000 // Set uid - c_ISGID = 02000 // Set gid - c_ISVTX = 01000 // Save text (sticky bit) - - // Common Unix mode constants; these are not defined in any common tar standard. - // Header.FileInfo understands these, but FileInfoHeader will never produce these. - c_ISDIR = 040000 // Directory - c_ISFIFO = 010000 // FIFO - c_ISREG = 0100000 // Regular file - c_ISLNK = 0120000 // Symbolic link - c_ISBLK = 060000 // Block special file - c_ISCHR = 020000 // Character special file - c_ISSOCK = 0140000 // Socket -) - -// FileInfoHeader creates a partially-populated Header from fi. -// If fi describes a symlink, FileInfoHeader records link as the link target. -// If fi describes a directory, a slash is appended to the name. -// -// Since os.FileInfo's Name method only returns the base name of -// the file it describes, it may be necessary to modify Header.Name -// to provide the full path name of the file. -func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { - if fi == nil { - return nil, errors.New("archive/tar: FileInfo is nil") - } - fm := fi.Mode() - h := &Header{ - Name: fi.Name(), - ModTime: fi.ModTime(), - Mode: int64(fm.Perm()), // or'd with c_IS* constants later - } - switch { - case fm.IsRegular(): - h.Typeflag = TypeReg - h.Size = fi.Size() - case fi.IsDir(): - h.Typeflag = TypeDir - h.Name += "/" - case fm&os.ModeSymlink != 0: - h.Typeflag = TypeSymlink - h.Linkname = link - case fm&os.ModeDevice != 0: - if fm&os.ModeCharDevice != 0 { - h.Typeflag = TypeChar - } else { - h.Typeflag = TypeBlock - } - case fm&os.ModeNamedPipe != 0: - h.Typeflag = TypeFifo - case fm&os.ModeSocket != 0: - return nil, fmt.Errorf("archive/tar: sockets not supported") - default: - return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) - } - if fm&os.ModeSetuid != 0 { - h.Mode |= c_ISUID - } - if fm&os.ModeSetgid != 0 { - h.Mode |= c_ISGID - } - if fm&os.ModeSticky != 0 { - h.Mode |= c_ISVTX - } - // If possible, populate additional fields from OS-specific - // FileInfo fields. - if sys, ok := fi.Sys().(*Header); ok { - // This FileInfo came from a Header (not the OS). Use the - // original Header to populate all remaining fields. - h.Uid = sys.Uid - h.Gid = sys.Gid - h.Uname = sys.Uname - h.Gname = sys.Gname - h.AccessTime = sys.AccessTime - h.ChangeTime = sys.ChangeTime - if sys.Xattrs != nil { - h.Xattrs = make(map[string]string) - for k, v := range sys.Xattrs { - h.Xattrs[k] = v - } - } - if sys.Typeflag == TypeLink { - // hard link - h.Typeflag = TypeLink - h.Size = 0 - h.Linkname = sys.Linkname - } - if sys.PAXRecords != nil { - h.PAXRecords = make(map[string]string) - for k, v := range sys.PAXRecords { - h.PAXRecords[k] = v - } - } - } - if sysStat != nil { - return h, sysStat(fi, h) - } - return h, nil -} - -// isHeaderOnlyType checks if the given type flag is of the type that has no -// data section even if a size is specified. -func isHeaderOnlyType(flag byte) bool { - switch flag { - case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo: - return true - default: - return false - } -} - -func min(a, b int64) int64 { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/format.go b/vendor/github.com/vbatts/tar-split/archive/tar/format.go deleted file mode 100644 index 1f89d0c59a..0000000000 --- a/vendor/github.com/vbatts/tar-split/archive/tar/format.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -import "strings" - -// Format represents the tar archive format. -// -// The original tar format was introduced in Unix V7. -// Since then, there have been multiple competing formats attempting to -// standardize or extend the V7 format to overcome its limitations. -// The most common formats are the USTAR, PAX, and GNU formats, -// each with their own advantages and limitations. -// -// The following table captures the capabilities of each format: -// -// | USTAR | PAX | GNU -// ------------------+--------+-----------+---------- -// Name | 256B | unlimited | unlimited -// Linkname | 100B | unlimited | unlimited -// Size | uint33 | unlimited | uint89 -// Mode | uint21 | uint21 | uint57 -// Uid/Gid | uint21 | unlimited | uint57 -// Uname/Gname | 32B | unlimited | 32B -// ModTime | uint33 | unlimited | int89 -// AccessTime | n/a | unlimited | int89 -// ChangeTime | n/a | unlimited | int89 -// Devmajor/Devminor | uint21 | uint21 | uint57 -// ------------------+--------+-----------+---------- -// string encoding | ASCII | UTF-8 | binary -// sub-second times | no | yes | no -// sparse files | no | yes | yes -// -// The table's upper portion shows the Header fields, where each format reports -// the maximum number of bytes allowed for each string field and -// the integer type used to store each numeric field -// (where timestamps are stored as the number of seconds since the Unix epoch). -// -// The table's lower portion shows specialized features of each format, -// such as supported string encodings, support for sub-second timestamps, -// or support for sparse files. -// -// The Writer currently provides no support for sparse files. -type Format int - -// Constants to identify various tar formats. -const ( - // Deliberately hide the meaning of constants from public API. - _ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc... - - // FormatUnknown indicates that the format is unknown. - FormatUnknown - - // The format of the original Unix V7 tar tool prior to standardization. - formatV7 - - // FormatUSTAR represents the USTAR header format defined in POSIX.1-1988. - // - // While this format is compatible with most tar readers, - // the format has several limitations making it unsuitable for some usages. - // Most notably, it cannot support sparse files, files larger than 8GiB, - // filenames larger than 256 characters, and non-ASCII filenames. - // - // Reference: - // http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06 - FormatUSTAR - - // FormatPAX represents the PAX header format defined in POSIX.1-2001. - // - // PAX extends USTAR by writing a special file with Typeflag TypeXHeader - // preceding the original header. This file contains a set of key-value - // records, which are used to overcome USTAR's shortcomings, in addition to - // providing the ability to have sub-second resolution for timestamps. - // - // Some newer formats add their own extensions to PAX by defining their - // own keys and assigning certain semantic meaning to the associated values. - // For example, sparse file support in PAX is implemented using keys - // defined by the GNU manual (e.g., "GNU.sparse.map"). - // - // Reference: - // http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html - FormatPAX - - // FormatGNU represents the GNU header format. - // - // The GNU header format is older than the USTAR and PAX standards and - // is not compatible with them. The GNU format supports - // arbitrary file sizes, filenames of arbitrary encoding and length, - // sparse files, and other features. - // - // It is recommended that PAX be chosen over GNU unless the target - // application can only parse GNU formatted archives. - // - // Reference: - // https://www.gnu.org/software/tar/manual/html_node/Standard.html - FormatGNU - - // Schily's tar format, which is incompatible with USTAR. - // This does not cover STAR extensions to the PAX format; these fall under - // the PAX format. - formatSTAR - - formatMax -) - -func (f Format) has(f2 Format) bool { return f&f2 != 0 } -func (f *Format) mayBe(f2 Format) { *f |= f2 } -func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 } -func (f *Format) mustNotBe(f2 Format) { *f &^= f2 } - -var formatNames = map[Format]string{ - formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR", -} - -func (f Format) String() string { - var ss []string - for f2 := Format(1); f2 < formatMax; f2 <<= 1 { - if f.has(f2) { - ss = append(ss, formatNames[f2]) - } - } - switch len(ss) { - case 0: - return "" - case 1: - return ss[0] - default: - return "(" + strings.Join(ss, " | ") + ")" - } -} - -// Magics used to identify various formats. -const ( - magicGNU, versionGNU = "ustar ", " \x00" - magicUSTAR, versionUSTAR = "ustar\x00", "00" - trailerSTAR = "tar\x00" -) - -// Size constants from various tar specifications. -const ( - blockSize = 512 // Size of each block in a tar stream - nameSize = 100 // Max length of the name field in USTAR format - prefixSize = 155 // Max length of the prefix field in USTAR format -) - -// blockPadding computes the number of bytes needed to pad offset up to the -// nearest block edge where 0 <= n < blockSize. -func blockPadding(offset int64) (n int64) { - return -offset & (blockSize - 1) -} - -var zeroBlock block - -type block [blockSize]byte - -// Convert block to any number of formats. -func (b *block) V7() *headerV7 { return (*headerV7)(b) } -func (b *block) GNU() *headerGNU { return (*headerGNU)(b) } -func (b *block) STAR() *headerSTAR { return (*headerSTAR)(b) } -func (b *block) USTAR() *headerUSTAR { return (*headerUSTAR)(b) } -func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) } - -// GetFormat checks that the block is a valid tar header based on the checksum. -// It then attempts to guess the specific format based on magic values. -// If the checksum fails, then FormatUnknown is returned. -func (b *block) GetFormat() Format { - // Verify checksum. - var p parser - value := p.parseOctal(b.V7().Chksum()) - chksum1, chksum2 := b.ComputeChecksum() - if p.err != nil || (value != chksum1 && value != chksum2) { - return FormatUnknown - } - - // Guess the magic values. - magic := string(b.USTAR().Magic()) - version := string(b.USTAR().Version()) - trailer := string(b.STAR().Trailer()) - switch { - case magic == magicUSTAR && trailer == trailerSTAR: - return formatSTAR - case magic == magicUSTAR: - return FormatUSTAR | FormatPAX - case magic == magicGNU && version == versionGNU: - return FormatGNU - default: - return formatV7 - } -} - -// SetFormat writes the magic values necessary for specified format -// and then updates the checksum accordingly. -func (b *block) SetFormat(format Format) { - // Set the magic values. - switch { - case format.has(formatV7): - // Do nothing. - case format.has(FormatGNU): - copy(b.GNU().Magic(), magicGNU) - copy(b.GNU().Version(), versionGNU) - case format.has(formatSTAR): - copy(b.STAR().Magic(), magicUSTAR) - copy(b.STAR().Version(), versionUSTAR) - copy(b.STAR().Trailer(), trailerSTAR) - case format.has(FormatUSTAR | FormatPAX): - copy(b.USTAR().Magic(), magicUSTAR) - copy(b.USTAR().Version(), versionUSTAR) - default: - panic("invalid format") - } - - // Update checksum. - // This field is special in that it is terminated by a NULL then space. - var f formatter - field := b.V7().Chksum() - chksum, _ := b.ComputeChecksum() // Possible values are 256..128776 - f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143 - field[7] = ' ' -} - -// ComputeChecksum computes the checksum for the header block. -// POSIX specifies a sum of the unsigned byte values, but the Sun tar used -// signed byte values. -// We compute and return both. -func (b *block) ComputeChecksum() (unsigned, signed int64) { - for i, c := range b { - if 148 <= i && i < 156 { - c = ' ' // Treat the checksum field itself as all spaces. - } - unsigned += int64(c) - signed += int64(int8(c)) - } - return unsigned, signed -} - -// Reset clears the block with all zeros. -func (b *block) Reset() { - *b = block{} -} - -type headerV7 [blockSize]byte - -func (h *headerV7) Name() []byte { return h[000:][:100] } -func (h *headerV7) Mode() []byte { return h[100:][:8] } -func (h *headerV7) UID() []byte { return h[108:][:8] } -func (h *headerV7) GID() []byte { return h[116:][:8] } -func (h *headerV7) Size() []byte { return h[124:][:12] } -func (h *headerV7) ModTime() []byte { return h[136:][:12] } -func (h *headerV7) Chksum() []byte { return h[148:][:8] } -func (h *headerV7) TypeFlag() []byte { return h[156:][:1] } -func (h *headerV7) LinkName() []byte { return h[157:][:100] } - -type headerGNU [blockSize]byte - -func (h *headerGNU) V7() *headerV7 { return (*headerV7)(h) } -func (h *headerGNU) Magic() []byte { return h[257:][:6] } -func (h *headerGNU) Version() []byte { return h[263:][:2] } -func (h *headerGNU) UserName() []byte { return h[265:][:32] } -func (h *headerGNU) GroupName() []byte { return h[297:][:32] } -func (h *headerGNU) DevMajor() []byte { return h[329:][:8] } -func (h *headerGNU) DevMinor() []byte { return h[337:][:8] } -func (h *headerGNU) AccessTime() []byte { return h[345:][:12] } -func (h *headerGNU) ChangeTime() []byte { return h[357:][:12] } -func (h *headerGNU) Sparse() sparseArray { return (sparseArray)(h[386:][:24*4+1]) } -func (h *headerGNU) RealSize() []byte { return h[483:][:12] } - -type headerSTAR [blockSize]byte - -func (h *headerSTAR) V7() *headerV7 { return (*headerV7)(h) } -func (h *headerSTAR) Magic() []byte { return h[257:][:6] } -func (h *headerSTAR) Version() []byte { return h[263:][:2] } -func (h *headerSTAR) UserName() []byte { return h[265:][:32] } -func (h *headerSTAR) GroupName() []byte { return h[297:][:32] } -func (h *headerSTAR) DevMajor() []byte { return h[329:][:8] } -func (h *headerSTAR) DevMinor() []byte { return h[337:][:8] } -func (h *headerSTAR) Prefix() []byte { return h[345:][:131] } -func (h *headerSTAR) AccessTime() []byte { return h[476:][:12] } -func (h *headerSTAR) ChangeTime() []byte { return h[488:][:12] } -func (h *headerSTAR) Trailer() []byte { return h[508:][:4] } - -type headerUSTAR [blockSize]byte - -func (h *headerUSTAR) V7() *headerV7 { return (*headerV7)(h) } -func (h *headerUSTAR) Magic() []byte { return h[257:][:6] } -func (h *headerUSTAR) Version() []byte { return h[263:][:2] } -func (h *headerUSTAR) UserName() []byte { return h[265:][:32] } -func (h *headerUSTAR) GroupName() []byte { return h[297:][:32] } -func (h *headerUSTAR) DevMajor() []byte { return h[329:][:8] } -func (h *headerUSTAR) DevMinor() []byte { return h[337:][:8] } -func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] } - -type sparseArray []byte - -func (s sparseArray) Entry(i int) sparseElem { return (sparseElem)(s[i*24:]) } -func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] } -func (s sparseArray) MaxEntries() int { return len(s) / 24 } - -type sparseElem []byte - -func (s sparseElem) Offset() []byte { return s[00:][:12] } -func (s sparseElem) Length() []byte { return s[12:][:12] } diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go deleted file mode 100644 index af006fc92e..0000000000 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ /dev/null @@ -1,924 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -import ( - "bytes" - "io" - "strconv" - "strings" - "time" -) - -// Reader provides sequential access to the contents of a tar archive. -// Reader.Next advances to the next file in the archive (including the first), -// and then Reader can be treated as an io.Reader to access the file's data. -type Reader struct { - r io.Reader - pad int64 // Amount of padding (ignored) after current file entry - curr fileReader // Reader for current file entry - blk block // Buffer to use as temporary local storage - - // err is a persistent error. - // It is only the responsibility of every exported method of Reader to - // ensure that this error is sticky. - err error - - RawAccounting bool // Whether to enable the access needed to reassemble the tar from raw bytes. Some performance/memory hit for this. - rawBytes *bytes.Buffer // last raw bits -} - -type fileReader interface { - io.Reader - fileState - - WriteTo(io.Writer) (int64, error) -} - -// RawBytes accesses the raw bytes of the archive, apart from the file payload itself. -// This includes the header and padding. -// -// # This call resets the current rawbytes buffer -// -// Only when RawAccounting is enabled, otherwise this returns nil -func (tr *Reader) RawBytes() []byte { - if !tr.RawAccounting { - return nil - } - if tr.rawBytes == nil { - tr.rawBytes = bytes.NewBuffer(nil) - } - defer tr.rawBytes.Reset() // if we've read them, then flush them. - - return tr.rawBytes.Bytes() - -} - -// NewReader creates a new Reader reading from r. -func NewReader(r io.Reader) *Reader { - return &Reader{r: r, curr: ®FileReader{r, 0}} -} - -// Next advances to the next entry in the tar archive. -// The Header.Size determines how many bytes can be read for the next file. -// Any remaining data in the current file is automatically discarded. -// -// io.EOF is returned at the end of the input. -func (tr *Reader) Next() (*Header, error) { - if tr.err != nil { - return nil, tr.err - } - hdr, err := tr.next() - tr.err = err - return hdr, err -} - -func (tr *Reader) next() (*Header, error) { - var paxHdrs map[string]string - var gnuLongName, gnuLongLink string - - if tr.RawAccounting { - if tr.rawBytes == nil { - tr.rawBytes = bytes.NewBuffer(nil) - } else { - tr.rawBytes.Reset() - } - } - - // Externally, Next iterates through the tar archive as if it is a series of - // files. Internally, the tar format often uses fake "files" to add meta - // data that describes the next file. These meta data "files" should not - // normally be visible to the outside. As such, this loop iterates through - // one or more "header files" until it finds a "normal file". - format := FormatUSTAR | FormatPAX | FormatGNU - for { - // Discard the remainder of the file and any padding. - if err := discard(tr, tr.curr.PhysicalRemaining()); err != nil { - return nil, err - } - n, err := tryReadFull(tr.r, tr.blk[:tr.pad]) - if err != nil { - return nil, err - } - if tr.RawAccounting { - tr.rawBytes.Write(tr.blk[:n]) - } - tr.pad = 0 - - hdr, rawHdr, err := tr.readHeader() - if err != nil { - return nil, err - } - if err := tr.handleRegularFile(hdr); err != nil { - return nil, err - } - format.mayOnlyBe(hdr.Format) - - // Check for PAX/GNU special headers and files. - switch hdr.Typeflag { - case TypeXHeader, TypeXGlobalHeader: - format.mayOnlyBe(FormatPAX) - paxHdrs, err = parsePAX(tr) - if err != nil { - return nil, err - } - if hdr.Typeflag == TypeXGlobalHeader { - if err = mergePAX(hdr, paxHdrs); err != nil { - return nil, err - } - return &Header{ - Name: hdr.Name, - Typeflag: hdr.Typeflag, - Xattrs: hdr.Xattrs, - PAXRecords: hdr.PAXRecords, - Format: format, - }, nil - } - continue // This is a meta header affecting the next header - case TypeGNULongName, TypeGNULongLink: - format.mayOnlyBe(FormatGNU) - realname, err := io.ReadAll(tr) - if err != nil { - return nil, err - } - - if tr.RawAccounting { - tr.rawBytes.Write(realname) - } - - var p parser - switch hdr.Typeflag { - case TypeGNULongName: - gnuLongName = p.parseString(realname) - case TypeGNULongLink: - gnuLongLink = p.parseString(realname) - } - continue // This is a meta header affecting the next header - default: - // The old GNU sparse format is handled here since it is technically - // just a regular file with additional attributes. - - if err := mergePAX(hdr, paxHdrs); err != nil { - return nil, err - } - if gnuLongName != "" { - hdr.Name = gnuLongName - } - if gnuLongLink != "" { - hdr.Linkname = gnuLongLink - } - if hdr.Typeflag == TypeRegA { - if strings.HasSuffix(hdr.Name, "/") { - hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories - } else { - hdr.Typeflag = TypeReg - } - } - - // The extended headers may have updated the size. - // Thus, setup the regFileReader again after merging PAX headers. - if err := tr.handleRegularFile(hdr); err != nil { - return nil, err - } - - // Sparse formats rely on being able to read from the logical data - // section; there must be a preceding call to handleRegularFile. - if err := tr.handleSparseFile(hdr, rawHdr); err != nil { - return nil, err - } - - // Set the final guess at the format. - if format.has(FormatUSTAR) && format.has(FormatPAX) { - format.mayOnlyBe(FormatUSTAR) - } - hdr.Format = format - return hdr, nil // This is a file, so stop - } - } -} - -// handleRegularFile sets up the current file reader and padding such that it -// can only read the following logical data section. It will properly handle -// special headers that contain no data section. -func (tr *Reader) handleRegularFile(hdr *Header) error { - nb := hdr.Size - if isHeaderOnlyType(hdr.Typeflag) { - nb = 0 - } - if nb < 0 { - return ErrHeader - } - - tr.pad = blockPadding(nb) - tr.curr = ®FileReader{r: tr.r, nb: nb} - return nil -} - -// handleSparseFile checks if the current file is a sparse format of any type -// and sets the curr reader appropriately. -func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error { - var spd sparseDatas - var err error - if hdr.Typeflag == TypeGNUSparse { - spd, err = tr.readOldGNUSparseMap(hdr, rawHdr) - } else { - spd, err = tr.readGNUSparsePAXHeaders(hdr) - } - - // If sp is non-nil, then this is a sparse file. - // Note that it is possible for len(sp) == 0. - if err == nil && spd != nil { - if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) { - return ErrHeader - } - sph := invertSparseEntries(spd, hdr.Size) - tr.curr = &sparseFileReader{tr.curr, sph, 0} - } - return err -} - -// readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. -// If they are found, then this function reads the sparse map and returns it. -// This assumes that 0.0 headers have already been converted to 0.1 headers -// by the PAX header parsing logic. -func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) { - // Identify the version of GNU headers. - var is1x0 bool - major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor] - switch { - case major == "0" && (minor == "0" || minor == "1"): - is1x0 = false - case major == "1" && minor == "0": - is1x0 = true - case major != "" || minor != "": - return nil, nil // Unknown GNU sparse PAX version - case hdr.PAXRecords[paxGNUSparseMap] != "": - is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess - default: - return nil, nil // Not a PAX format GNU sparse file. - } - hdr.Format.mayOnlyBe(FormatPAX) - - // Update hdr from GNU sparse PAX headers. - if name := hdr.PAXRecords[paxGNUSparseName]; name != "" { - hdr.Name = name - } - size := hdr.PAXRecords[paxGNUSparseSize] - if size == "" { - size = hdr.PAXRecords[paxGNUSparseRealSize] - } - if size != "" { - n, err := strconv.ParseInt(size, 10, 64) - if err != nil { - return nil, ErrHeader - } - hdr.Size = n - } - - // Read the sparse map according to the appropriate format. - if is1x0 { - return readGNUSparseMap1x0(tr.curr) - } - return readGNUSparseMap0x1(hdr.PAXRecords) -} - -// mergePAX merges paxHdrs into hdr for all relevant fields of Header. -func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) { - for k, v := range paxHdrs { - if v == "" { - continue // Keep the original USTAR value - } - var id64 int64 - switch k { - case paxPath: - hdr.Name = v - case paxLinkpath: - hdr.Linkname = v - case paxUname: - hdr.Uname = v - case paxGname: - hdr.Gname = v - case paxUid: - id64, err = strconv.ParseInt(v, 10, 64) - hdr.Uid = int(id64) // Integer overflow possible - case paxGid: - id64, err = strconv.ParseInt(v, 10, 64) - hdr.Gid = int(id64) // Integer overflow possible - case paxAtime: - hdr.AccessTime, err = parsePAXTime(v) - case paxMtime: - hdr.ModTime, err = parsePAXTime(v) - case paxCtime: - hdr.ChangeTime, err = parsePAXTime(v) - case paxSize: - hdr.Size, err = strconv.ParseInt(v, 10, 64) - default: - if strings.HasPrefix(k, paxSchilyXattr) { - if hdr.Xattrs == nil { - hdr.Xattrs = make(map[string]string) - } - hdr.Xattrs[k[len(paxSchilyXattr):]] = v - } - } - if err != nil { - return ErrHeader - } - } - hdr.PAXRecords = paxHdrs - return nil -} - -// parsePAX parses PAX headers. -// If an extended header (type 'x') is invalid, ErrHeader is returned -func parsePAX(r io.Reader) (map[string]string, error) { - buf, err := io.ReadAll(r) - if err != nil { - return nil, err - } - // leaving this function for io.Reader makes it more testable - if tr, ok := r.(*Reader); ok && tr.RawAccounting { - if _, err = tr.rawBytes.Write(buf); err != nil { - return nil, err - } - } - sbuf := string(buf) - - // For GNU PAX sparse format 0.0 support. - // This function transforms the sparse format 0.0 headers into format 0.1 - // headers since 0.0 headers were not PAX compliant. - var sparseMap []string - - paxHdrs := make(map[string]string) - for len(sbuf) > 0 { - key, value, residual, err := parsePAXRecord(sbuf) - if err != nil { - return nil, ErrHeader - } - sbuf = residual - - switch key { - case paxGNUSparseOffset, paxGNUSparseNumBytes: - // Validate sparse header order and value. - if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) || - (len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) || - strings.Contains(value, ",") { - return nil, ErrHeader - } - sparseMap = append(sparseMap, value) - default: - paxHdrs[key] = value - } - } - if len(sparseMap) > 0 { - paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",") - } - return paxHdrs, nil -} - -// readHeader reads the next block header and assumes that the underlying reader -// is already aligned to a block boundary. It returns the raw block of the -// header in case further processing is required. -// -// The err will be set to io.EOF only when one of the following occurs: -// - Exactly 0 bytes are read and EOF is hit. -// - Exactly 1 block of zeros is read and EOF is hit. -// - At least 2 blocks of zeros are read. -func (tr *Reader) readHeader() (*Header, *block, error) { - // Two blocks of zero bytes marks the end of the archive. - n, err := io.ReadFull(tr.r, tr.blk[:]) - if tr.RawAccounting && (err == nil || err == io.EOF) { - tr.rawBytes.Write(tr.blk[:n]) - } - if err != nil { - return nil, nil, err // EOF is okay here; exactly 0 bytes read - } - - if bytes.Equal(tr.blk[:], zeroBlock[:]) { - n, err = io.ReadFull(tr.r, tr.blk[:]) - if tr.RawAccounting && (err == nil || err == io.EOF) { - tr.rawBytes.Write(tr.blk[:n]) - } - if err != nil { - return nil, nil, err // EOF is okay here; exactly 1 block of zeros read - } - if bytes.Equal(tr.blk[:], zeroBlock[:]) { - return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read - } - return nil, nil, ErrHeader // Zero block and then non-zero block - } - - // Verify the header matches a known format. - format := tr.blk.GetFormat() - if format == FormatUnknown { - return nil, nil, ErrHeader - } - - var p parser - hdr := new(Header) - - // Unpack the V7 header. - v7 := tr.blk.V7() - hdr.Typeflag = v7.TypeFlag()[0] - hdr.Name = p.parseString(v7.Name()) - hdr.Linkname = p.parseString(v7.LinkName()) - hdr.Size = p.parseNumeric(v7.Size()) - hdr.Mode = p.parseNumeric(v7.Mode()) - hdr.Uid = int(p.parseNumeric(v7.UID())) - hdr.Gid = int(p.parseNumeric(v7.GID())) - hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0) - - // Unpack format specific fields. - if format > formatV7 { - ustar := tr.blk.USTAR() - hdr.Uname = p.parseString(ustar.UserName()) - hdr.Gname = p.parseString(ustar.GroupName()) - hdr.Devmajor = p.parseNumeric(ustar.DevMajor()) - hdr.Devminor = p.parseNumeric(ustar.DevMinor()) - - var prefix string - switch { - case format.has(FormatUSTAR | FormatPAX): - hdr.Format = format - ustar := tr.blk.USTAR() - prefix = p.parseString(ustar.Prefix()) - - // For Format detection, check if block is properly formatted since - // the parser is more liberal than what USTAR actually permits. - notASCII := func(r rune) bool { return r >= 0x80 } - if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 { - hdr.Format = FormatUnknown // Non-ASCII characters in block. - } - nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 } - if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) && - nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) { - hdr.Format = FormatUnknown // Numeric fields must end in NUL - } - case format.has(formatSTAR): - star := tr.blk.STAR() - prefix = p.parseString(star.Prefix()) - hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0) - hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0) - case format.has(FormatGNU): - hdr.Format = format - var p2 parser - gnu := tr.blk.GNU() - if b := gnu.AccessTime(); b[0] != 0 { - hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0) - } - if b := gnu.ChangeTime(); b[0] != 0 { - hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0) - } - - // Prior to Go1.8, the Writer had a bug where it would output - // an invalid tar file in certain rare situations because the logic - // incorrectly believed that the old GNU format had a prefix field. - // This is wrong and leads to an output file that mangles the - // atime and ctime fields, which are often left unused. - // - // In order to continue reading tar files created by former, buggy - // versions of Go, we skeptically parse the atime and ctime fields. - // If we are unable to parse them and the prefix field looks like - // an ASCII string, then we fallback on the pre-Go1.8 behavior - // of treating these fields as the USTAR prefix field. - // - // Note that this will not use the fallback logic for all possible - // files generated by a pre-Go1.8 toolchain. If the generated file - // happened to have a prefix field that parses as valid - // atime and ctime fields (e.g., when they are valid octal strings), - // then it is impossible to distinguish between an valid GNU file - // and an invalid pre-Go1.8 file. - // - // See https://golang.org/issues/12594 - // See https://golang.org/issues/21005 - if p2.err != nil { - hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{} - ustar := tr.blk.USTAR() - if s := p.parseString(ustar.Prefix()); isASCII(s) { - prefix = s - } - hdr.Format = FormatUnknown // Buggy file is not GNU - } - } - if len(prefix) > 0 { - hdr.Name = prefix + "/" + hdr.Name - } - } - return hdr, &tr.blk, p.err -} - -// readOldGNUSparseMap reads the sparse map from the old GNU sparse format. -// The sparse map is stored in the tar header if it's small enough. -// If it's larger than four entries, then one or more extension headers are used -// to store the rest of the sparse map. -// -// The Header.Size does not reflect the size of any extended headers used. -// Thus, this function will read from the raw io.Reader to fetch extra headers. -// This method mutates blk in the process. -func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) { - // Make sure that the input format is GNU. - // Unfortunately, the STAR format also has a sparse header format that uses - // the same type flag but has a completely different layout. - if blk.GetFormat() != FormatGNU { - return nil, ErrHeader - } - hdr.Format.mayOnlyBe(FormatGNU) - - var p parser - hdr.Size = p.parseNumeric(blk.GNU().RealSize()) - if p.err != nil { - return nil, p.err - } - s := blk.GNU().Sparse() - spd := make(sparseDatas, 0, s.MaxEntries()) - for { - for i := 0; i < s.MaxEntries(); i++ { - // This termination condition is identical to GNU and BSD tar. - if s.Entry(i).Offset()[0] == 0x00 { - break // Don't return, need to process extended headers (even if empty) - } - offset := p.parseNumeric(s.Entry(i).Offset()) - length := p.parseNumeric(s.Entry(i).Length()) - if p.err != nil { - return nil, p.err - } - spd = append(spd, sparseEntry{Offset: offset, Length: length}) - } - - if s.IsExtended()[0] > 0 { - // There are more entries. Read an extension header and parse its entries. - if _, err := mustReadFull(tr.r, blk[:]); err != nil { - return nil, err - } - if tr.RawAccounting { - tr.rawBytes.Write(blk[:]) - } - s = blk.Sparse() - continue - } - return spd, nil // Done - } -} - -// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format -// version 1.0. The format of the sparse map consists of a series of -// newline-terminated numeric fields. The first field is the number of entries -// and is always present. Following this are the entries, consisting of two -// fields (offset, length). This function must stop reading at the end -// boundary of the block containing the last newline. -// -// Note that the GNU manual says that numeric values should be encoded in octal -// format. However, the GNU tar utility itself outputs these values in decimal. -// As such, this library treats values as being encoded in decimal. -func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { - var ( - cntNewline int64 - buf bytes.Buffer - blk block - ) - - // feedTokens copies data in blocks from r into buf until there are - // at least cnt newlines in buf. It will not read more blocks than needed. - feedTokens := func(n int64) error { - for cntNewline < n { - if _, err := mustReadFull(r, blk[:]); err != nil { - return err - } - buf.Write(blk[:]) - for _, c := range blk { - if c == '\n' { - cntNewline++ - } - } - } - return nil - } - - // nextToken gets the next token delimited by a newline. This assumes that - // at least one newline exists in the buffer. - nextToken := func() string { - cntNewline-- - tok, _ := buf.ReadString('\n') - return strings.TrimRight(tok, "\n") - } - - // Parse for the number of entries. - // Use integer overflow resistant math to check this. - if err := feedTokens(1); err != nil { - return nil, err - } - numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int - if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { - return nil, ErrHeader - } - - // Parse for all member entries. - // numEntries is trusted after this since a potential attacker must have - // committed resources proportional to what this library used. - if err := feedTokens(2 * numEntries); err != nil { - return nil, err - } - spd := make(sparseDatas, 0, numEntries) - for i := int64(0); i < numEntries; i++ { - offset, err1 := strconv.ParseInt(nextToken(), 10, 64) - length, err2 := strconv.ParseInt(nextToken(), 10, 64) - if err1 != nil || err2 != nil { - return nil, ErrHeader - } - spd = append(spd, sparseEntry{Offset: offset, Length: length}) - } - return spd, nil -} - -// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format -// version 0.1. The sparse map is stored in the PAX headers. -func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) { - // Get number of entries. - // Use integer overflow resistant math to check this. - numEntriesStr := paxHdrs[paxGNUSparseNumBlocks] - numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int - if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { - return nil, ErrHeader - } - - // There should be two numbers in sparseMap for each entry. - sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",") - if len(sparseMap) == 1 && sparseMap[0] == "" { - sparseMap = sparseMap[:0] - } - if int64(len(sparseMap)) != 2*numEntries { - return nil, ErrHeader - } - - // Loop through the entries in the sparse map. - // numEntries is trusted now. - spd := make(sparseDatas, 0, numEntries) - for len(sparseMap) >= 2 { - offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64) - length, err2 := strconv.ParseInt(sparseMap[1], 10, 64) - if err1 != nil || err2 != nil { - return nil, ErrHeader - } - spd = append(spd, sparseEntry{Offset: offset, Length: length}) - sparseMap = sparseMap[2:] - } - return spd, nil -} - -// Read reads from the current file in the tar archive. -// It returns (0, io.EOF) when it reaches the end of that file, -// until Next is called to advance to the next file. -// -// If the current file is sparse, then the regions marked as a hole -// are read back as NUL-bytes. -// -// Calling Read on special types like TypeLink, TypeSymlink, TypeChar, -// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what -// the Header.Size claims. -func (tr *Reader) Read(b []byte) (int, error) { - if tr.err != nil { - return 0, tr.err - } - n, err := tr.curr.Read(b) - if err != nil && err != io.EOF { - tr.err = err - } - return n, err -} - -// writeTo writes the content of the current file to w. -// The bytes written matches the number of remaining bytes in the current file. -// -// If the current file is sparse and w is an io.WriteSeeker, -// then writeTo uses Seek to skip past holes defined in Header.SparseHoles, -// assuming that skipped regions are filled with NULs. -// This always writes the last byte to ensure w is the right size. -// -// TODO(dsnet): Re-export this when adding sparse file support. -// See https://golang.org/issue/22735 -func (tr *Reader) writeTo(w io.Writer) (int64, error) { - if tr.err != nil { - return 0, tr.err - } - n, err := tr.curr.WriteTo(w) - if err != nil { - tr.err = err - } - return n, err -} - -// regFileReader is a fileReader for reading data from a regular file entry. -type regFileReader struct { - r io.Reader // Underlying Reader - nb int64 // Number of remaining bytes to read -} - -func (fr *regFileReader) Read(b []byte) (n int, err error) { - if int64(len(b)) > fr.nb { - b = b[:fr.nb] - } - if len(b) > 0 { - n, err = fr.r.Read(b) - fr.nb -= int64(n) - } - switch { - case err == io.EOF && fr.nb > 0: - return n, io.ErrUnexpectedEOF - case err == nil && fr.nb == 0: - return n, io.EOF - default: - return n, err - } -} - -func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) { - return io.Copy(w, struct{ io.Reader }{fr}) -} - -func (fr regFileReader) LogicalRemaining() int64 { - return fr.nb -} - -func (fr regFileReader) PhysicalRemaining() int64 { - return fr.nb -} - -// sparseFileReader is a fileReader for reading data from a sparse file entry. -type sparseFileReader struct { - fr fileReader // Underlying fileReader - sp sparseHoles // Normalized list of sparse holes - pos int64 // Current position in sparse file -} - -func (sr *sparseFileReader) Read(b []byte) (n int, err error) { - finished := int64(len(b)) >= sr.LogicalRemaining() - if finished { - b = b[:sr.LogicalRemaining()] - } - - b0 := b - endPos := sr.pos + int64(len(b)) - for endPos > sr.pos && err == nil { - var nf int // Bytes read in fragment - holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() - if sr.pos < holeStart { // In a data fragment - bf := b[:min(int64(len(b)), holeStart-sr.pos)] - nf, err = tryReadFull(sr.fr, bf) - } else { // In a hole fragment - bf := b[:min(int64(len(b)), holeEnd-sr.pos)] - nf, err = tryReadFull(zeroReader{}, bf) - } - b = b[nf:] - sr.pos += int64(nf) - if sr.pos >= holeEnd && len(sr.sp) > 1 { - sr.sp = sr.sp[1:] // Ensure last fragment always remains - } - } - - n = len(b0) - len(b) - switch { - case err == io.EOF: - return n, errMissData // Less data in dense file than sparse file - case err != nil: - return n, err - case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: - return n, errUnrefData // More data in dense file than sparse file - case finished: - return n, io.EOF - default: - return n, nil - } -} - -func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) { - ws, ok := w.(io.WriteSeeker) - if ok { - if _, err := ws.Seek(0, io.SeekCurrent); err != nil { - ok = false // Not all io.Seeker can really seek - } - } - if !ok { - return io.Copy(w, struct{ io.Reader }{sr}) - } - - var writeLastByte bool - pos0 := sr.pos - for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil { - var nf int64 // Size of fragment - holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() - if sr.pos < holeStart { // In a data fragment - nf = holeStart - sr.pos - nf, err = io.CopyN(ws, sr.fr, nf) - } else { // In a hole fragment - nf = holeEnd - sr.pos - if sr.PhysicalRemaining() == 0 { - writeLastByte = true - nf-- - } - _, err = ws.Seek(nf, io.SeekCurrent) - } - sr.pos += nf - if sr.pos >= holeEnd && len(sr.sp) > 1 { - sr.sp = sr.sp[1:] // Ensure last fragment always remains - } - } - - // If the last fragment is a hole, then seek to 1-byte before EOF, and - // write a single byte to ensure the file is the right size. - if writeLastByte && err == nil { - _, err = ws.Write([]byte{0}) - sr.pos++ - } - - n = sr.pos - pos0 - switch { - case err == io.EOF: - return n, errMissData // Less data in dense file than sparse file - case err != nil: - return n, err - case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: - return n, errUnrefData // More data in dense file than sparse file - default: - return n, nil - } -} - -func (sr sparseFileReader) LogicalRemaining() int64 { - return sr.sp[len(sr.sp)-1].endOffset() - sr.pos -} -func (sr sparseFileReader) PhysicalRemaining() int64 { - return sr.fr.PhysicalRemaining() -} - -type zeroReader struct{} - -func (zeroReader) Read(b []byte) (int, error) { - for i := range b { - b[i] = 0 - } - return len(b), nil -} - -// mustReadFull is like io.ReadFull except it returns -// io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read. -func mustReadFull(r io.Reader, b []byte) (int, error) { - n, err := tryReadFull(r, b) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return n, err -} - -// tryReadFull is like io.ReadFull except it returns -// io.EOF when it is hit before len(b) bytes are read. -func tryReadFull(r io.Reader, b []byte) (n int, err error) { - for len(b) > n && err == nil { - var nn int - nn, err = r.Read(b[n:]) - n += nn - } - if len(b) == n && err == io.EOF { - err = nil - } - return n, err -} - -// discard skips n bytes in r, reporting an error if unable to do so. -func discard(tr *Reader, n int64) error { - var seekSkipped, copySkipped int64 - var err error - r := tr.r - if tr.RawAccounting { - - copySkipped, err = io.CopyN(tr.rawBytes, tr.r, n) - goto out - } - - // If possible, Seek to the last byte before the end of the data section. - // Do this because Seek is often lazy about reporting errors; this will mask - // the fact that the stream may be truncated. We can rely on the - // io.CopyN done shortly afterwards to trigger any IO errors. - if sr, ok := r.(io.Seeker); ok && n > 1 { - // Not all io.Seeker can actually Seek. For example, os.Stdin implements - // io.Seeker, but calling Seek always returns an error and performs - // no action. Thus, we try an innocent seek to the current position - // to see if Seek is really supported. - pos1, err := sr.Seek(0, io.SeekCurrent) - if pos1 >= 0 && err == nil { - // Seek seems supported, so perform the real Seek. - pos2, err := sr.Seek(n-1, io.SeekCurrent) - if pos2 < 0 || err != nil { - return err - } - seekSkipped = pos2 - pos1 - } - } - - copySkipped, err = io.CopyN(io.Discard, r, n-seekSkipped) -out: - if err == io.EOF && seekSkipped+copySkipped < n { - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go deleted file mode 100644 index cf9cc79c59..0000000000 --- a/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux dragonfly openbsd solaris - -package tar - -import ( - "syscall" - "time" -) - -func statAtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Atim.Unix()) -} - -func statCtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Ctim.Unix()) -} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go deleted file mode 100644 index 6f17dbe307..0000000000 --- a/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin freebsd netbsd - -package tar - -import ( - "syscall" - "time" -) - -func statAtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Atimespec.Unix()) -} - -func statCtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Ctimespec.Unix()) -} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go deleted file mode 100644 index 868105f338..0000000000 --- a/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin dragonfly freebsd openbsd netbsd solaris - -package tar - -import ( - "os" - "os/user" - "runtime" - "strconv" - "sync" - "syscall" -) - -func init() { - sysStat = statUnix -} - -// userMap and groupMap caches UID and GID lookups for performance reasons. -// The downside is that renaming uname or gname by the OS never takes effect. -var userMap, groupMap sync.Map // map[int]string - -func statUnix(fi os.FileInfo, h *Header) error { - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return nil - } - h.Uid = int(sys.Uid) - h.Gid = int(sys.Gid) - - // Best effort at populating Uname and Gname. - // The os/user functions may fail for any number of reasons - // (not implemented on that platform, cgo not enabled, etc). - if u, ok := userMap.Load(h.Uid); ok { - h.Uname = u.(string) - } else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil { - h.Uname = u.Username - userMap.Store(h.Uid, h.Uname) - } - if g, ok := groupMap.Load(h.Gid); ok { - h.Gname = g.(string) - } else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil { - h.Gname = g.Name - groupMap.Store(h.Gid, h.Gname) - } - - h.AccessTime = statAtime(sys) - h.ChangeTime = statCtime(sys) - - // Best effort at populating Devmajor and Devminor. - if h.Typeflag == TypeChar || h.Typeflag == TypeBlock { - dev := uint64(sys.Rdev) // May be int32 or uint32 - switch runtime.GOOS { - case "linux": - // Copied from golang.org/x/sys/unix/dev_linux.go. - major := uint32((dev & 0x00000000000fff00) >> 8) - major |= uint32((dev & 0xfffff00000000000) >> 32) - minor := uint32((dev & 0x00000000000000ff) >> 0) - minor |= uint32((dev & 0x00000ffffff00000) >> 12) - h.Devmajor, h.Devminor = int64(major), int64(minor) - case "darwin": - // Copied from golang.org/x/sys/unix/dev_darwin.go. - major := uint32((dev >> 24) & 0xff) - minor := uint32(dev & 0xffffff) - h.Devmajor, h.Devminor = int64(major), int64(minor) - case "dragonfly": - // Copied from golang.org/x/sys/unix/dev_dragonfly.go. - major := uint32((dev >> 8) & 0xff) - minor := uint32(dev & 0xffff00ff) - h.Devmajor, h.Devminor = int64(major), int64(minor) - case "freebsd": - // Copied from golang.org/x/sys/unix/dev_freebsd.go. - major := uint32((dev >> 8) & 0xff) - minor := uint32(dev & 0xffff00ff) - h.Devmajor, h.Devminor = int64(major), int64(minor) - case "netbsd": - // Copied from golang.org/x/sys/unix/dev_netbsd.go. - major := uint32((dev & 0x000fff00) >> 8) - minor := uint32((dev & 0x000000ff) >> 0) - minor |= uint32((dev & 0xfff00000) >> 12) - h.Devmajor, h.Devminor = int64(major), int64(minor) - case "openbsd": - // Copied from golang.org/x/sys/unix/dev_openbsd.go. - major := uint32((dev & 0x0000ff00) >> 8) - minor := uint32((dev & 0x000000ff) >> 0) - minor |= uint32((dev & 0xffff0000) >> 8) - h.Devmajor, h.Devminor = int64(major), int64(minor) - default: - // TODO: Implement solaris (see https://golang.org/issue/8106) - } - } - return nil -} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/strconv.go b/vendor/github.com/vbatts/tar-split/archive/tar/strconv.go deleted file mode 100644 index d144485a49..0000000000 --- a/vendor/github.com/vbatts/tar-split/archive/tar/strconv.go +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -import ( - "bytes" - "fmt" - "strconv" - "strings" - "time" -) - -// hasNUL reports whether the NUL character exists within s. -func hasNUL(s string) bool { - return strings.IndexByte(s, 0) >= 0 -} - -// isASCII reports whether the input is an ASCII C-style string. -func isASCII(s string) bool { - for _, c := range s { - if c >= 0x80 || c == 0x00 { - return false - } - } - return true -} - -// toASCII converts the input to an ASCII C-style string. -// This a best effort conversion, so invalid characters are dropped. -func toASCII(s string) string { - if isASCII(s) { - return s - } - b := make([]byte, 0, len(s)) - for _, c := range s { - if c < 0x80 && c != 0x00 { - b = append(b, byte(c)) - } - } - return string(b) -} - -type parser struct { - err error // Last error seen -} - -type formatter struct { - err error // Last error seen -} - -// parseString parses bytes as a NUL-terminated C-style string. -// If a NUL byte is not found then the whole slice is returned as a string. -func (*parser) parseString(b []byte) string { - if i := bytes.IndexByte(b, 0); i >= 0 { - return string(b[:i]) - } - return string(b) -} - -// formatString copies s into b, NUL-terminating if possible. -func (f *formatter) formatString(b []byte, s string) { - if len(s) > len(b) { - f.err = ErrFieldTooLong - } - copy(b, s) - if len(s) < len(b) { - b[len(s)] = 0 - } - - // Some buggy readers treat regular files with a trailing slash - // in the V7 path field as a directory even though the full path - // recorded elsewhere (e.g., via PAX record) contains no trailing slash. - if len(s) > len(b) && b[len(b)-1] == '/' { - n := len(strings.TrimRight(s[:len(b)], "/")) - b[n] = 0 // Replace trailing slash with NUL terminator - } -} - -// fitsInBase256 reports whether x can be encoded into n bytes using base-256 -// encoding. Unlike octal encoding, base-256 encoding does not require that the -// string ends with a NUL character. Thus, all n bytes are available for output. -// -// If operating in binary mode, this assumes strict GNU binary mode; which means -// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is -// equivalent to the sign bit in two's complement form. -func fitsInBase256(n int, x int64) bool { - binBits := uint(n-1) * 8 - return n >= 9 || (x >= -1< 0 && b[0]&0x80 != 0 { - // Handling negative numbers relies on the following identity: - // -a-1 == ^a - // - // If the number is negative, we use an inversion mask to invert the - // data bytes and treat the value as an unsigned number. - var inv byte // 0x00 if positive or zero, 0xff if negative - if b[0]&0x40 != 0 { - inv = 0xff - } - - var x uint64 - for i, c := range b { - c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing - if i == 0 { - c &= 0x7f // Ignore signal bit in first byte - } - if (x >> 56) > 0 { - p.err = ErrHeader // Integer overflow - return 0 - } - x = x<<8 | uint64(c) - } - if (x >> 63) > 0 { - p.err = ErrHeader // Integer overflow - return 0 - } - if inv == 0xff { - return ^int64(x) - } - return int64(x) - } - - // Normal case is base-8 (octal) format. - return p.parseOctal(b) -} - -// formatNumeric encodes x into b using base-8 (octal) encoding if possible. -// Otherwise it will attempt to use base-256 (binary) encoding. -func (f *formatter) formatNumeric(b []byte, x int64) { - if fitsInOctal(len(b), x) { - f.formatOctal(b, x) - return - } - - if fitsInBase256(len(b), x) { - for i := len(b) - 1; i >= 0; i-- { - b[i] = byte(x) - x >>= 8 - } - b[0] |= 0x80 // Highest bit indicates binary format - return - } - - f.formatOctal(b, 0) // Last resort, just write zero - f.err = ErrFieldTooLong -} - -func (p *parser) parseOctal(b []byte) int64 { - // Because unused fields are filled with NULs, we need - // to skip leading NULs. Fields may also be padded with - // spaces or NULs. - // So we remove leading and trailing NULs and spaces to - // be sure. - b = bytes.Trim(b, " \x00") - - if len(b) == 0 { - return 0 - } - x, perr := strconv.ParseUint(p.parseString(b), 8, 64) - if perr != nil { - p.err = ErrHeader - } - return int64(x) -} - -func (f *formatter) formatOctal(b []byte, x int64) { - if !fitsInOctal(len(b), x) { - x = 0 // Last resort, just write zero - f.err = ErrFieldTooLong - } - - s := strconv.FormatInt(x, 8) - // Add leading zeros, but leave room for a NUL. - if n := len(b) - len(s) - 1; n > 0 { - s = strings.Repeat("0", n) + s - } - f.formatString(b, s) -} - -// fitsInOctal reports whether the integer x fits in a field n-bytes long -// using octal encoding with the appropriate NUL terminator. -func fitsInOctal(n int, x int64) bool { - octBits := uint(n-1) * 3 - return x >= 0 && (n >= 22 || x < 1<= 0 { - ss, sn = s[:pos], s[pos+1:] - } - - // Parse the seconds. - secs, err := strconv.ParseInt(ss, 10, 64) - if err != nil { - return time.Time{}, ErrHeader - } - if len(sn) == 0 { - return time.Unix(secs, 0), nil // No sub-second values - } - - // Parse the nanoseconds. - if strings.Trim(sn, "0123456789") != "" { - return time.Time{}, ErrHeader - } - if len(sn) < maxNanoSecondDigits { - sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad - } else { - sn = sn[:maxNanoSecondDigits] // Right truncate - } - nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed - if len(ss) > 0 && ss[0] == '-' { - return time.Unix(secs, -1*nsecs), nil // Negative correction - } - return time.Unix(secs, nsecs), nil -} - -// formatPAXTime converts ts into a time of the form %d.%d as described in the -// PAX specification. This function is capable of negative timestamps. -func formatPAXTime(ts time.Time) (s string) { - secs, nsecs := ts.Unix(), ts.Nanosecond() - if nsecs == 0 { - return strconv.FormatInt(secs, 10) - } - - // If seconds is negative, then perform correction. - sign := "" - if secs < 0 { - sign = "-" // Remember sign - secs = -(secs + 1) // Add a second to secs - nsecs = -(nsecs - 1E9) // Take that second away from nsecs - } - return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0") -} - -// parsePAXRecord parses the input PAX record string into a key-value pair. -// If parsing is successful, it will slice off the currently read record and -// return the remainder as r. -func parsePAXRecord(s string) (k, v, r string, err error) { - // The size field ends at the first space. - sp := strings.IndexByte(s, ' ') - if sp == -1 { - return "", "", s, ErrHeader - } - - // Parse the first token as a decimal integer. - n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int - if perr != nil || n < 5 || int64(len(s)) < n { - return "", "", s, ErrHeader - } - - // Extract everything between the space and the final newline. - rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:] - if nl != "\n" { - return "", "", s, ErrHeader - } - - // The first equals separates the key from the value. - eq := strings.IndexByte(rec, '=') - if eq == -1 { - return "", "", s, ErrHeader - } - k, v = rec[:eq], rec[eq+1:] - - if !validPAXRecord(k, v) { - return "", "", s, ErrHeader - } - return k, v, rem, nil -} - -// formatPAXRecord formats a single PAX record, prefixing it with the -// appropriate length. -func formatPAXRecord(k, v string) (string, error) { - if !validPAXRecord(k, v) { - return "", ErrHeader - } - - const padding = 3 // Extra padding for ' ', '=', and '\n' - size := len(k) + len(v) + padding - size += len(strconv.Itoa(size)) - record := strconv.Itoa(size) + " " + k + "=" + v + "\n" - - // Final adjustment if adding size field increased the record size. - if len(record) != size { - size = len(record) - record = strconv.Itoa(size) + " " + k + "=" + v + "\n" - } - return record, nil -} - -// validPAXRecord reports whether the key-value pair is valid where each -// record is formatted as: -// "%d %s=%s\n" % (size, key, value) -// -// Keys and values should be UTF-8, but the number of bad writers out there -// forces us to be a more liberal. -// Thus, we only reject all keys with NUL, and only reject NULs in values -// for the PAX version of the USTAR string fields. -// The key must not contain an '=' character. -func validPAXRecord(k, v string) bool { - if k == "" || strings.IndexByte(k, '=') >= 0 { - return false - } - switch k { - case paxPath, paxLinkpath, paxUname, paxGname: - return !hasNUL(v) - default: - return !hasNUL(k) - } -} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/writer.go b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go deleted file mode 100644 index e80498d03e..0000000000 --- a/vendor/github.com/vbatts/tar-split/archive/tar/writer.go +++ /dev/null @@ -1,653 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -import ( - "fmt" - "io" - "path" - "sort" - "strings" - "time" -) - -// Writer provides sequential writing of a tar archive. -// Write.WriteHeader begins a new file with the provided Header, -// and then Writer can be treated as an io.Writer to supply that file's data. -type Writer struct { - w io.Writer - pad int64 // Amount of padding to write after current file entry - curr fileWriter // Writer for current file entry - hdr Header // Shallow copy of Header that is safe for mutations - blk block // Buffer to use as temporary local storage - - // err is a persistent error. - // It is only the responsibility of every exported method of Writer to - // ensure that this error is sticky. - err error -} - -// NewWriter creates a new Writer writing to w. -func NewWriter(w io.Writer) *Writer { - return &Writer{w: w, curr: ®FileWriter{w, 0}} -} - -type fileWriter interface { - io.Writer - fileState - - ReadFrom(io.Reader) (int64, error) -} - -// Flush finishes writing the current file's block padding. -// The current file must be fully written before Flush can be called. -// -// This is unnecessary as the next call to WriteHeader or Close -// will implicitly flush out the file's padding. -func (tw *Writer) Flush() error { - if tw.err != nil { - return tw.err - } - if nb := tw.curr.LogicalRemaining(); nb > 0 { - return fmt.Errorf("archive/tar: missed writing %d bytes", nb) - } - if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil { - return tw.err - } - tw.pad = 0 - return nil -} - -// WriteHeader writes hdr and prepares to accept the file's contents. -// The Header.Size determines how many bytes can be written for the next file. -// If the current file is not fully written, then this returns an error. -// This implicitly flushes any padding necessary before writing the header. -func (tw *Writer) WriteHeader(hdr *Header) error { - if err := tw.Flush(); err != nil { - return err - } - tw.hdr = *hdr // Shallow copy of Header - - // Avoid usage of the legacy TypeRegA flag, and automatically promote - // it to use TypeReg or TypeDir. - if tw.hdr.Typeflag == TypeRegA { - if strings.HasSuffix(tw.hdr.Name, "/") { - tw.hdr.Typeflag = TypeDir - } else { - tw.hdr.Typeflag = TypeReg - } - } - - // Round ModTime and ignore AccessTime and ChangeTime unless - // the format is explicitly chosen. - // This ensures nominal usage of WriteHeader (without specifying the format) - // does not always result in the PAX format being chosen, which - // causes a 1KiB increase to every header. - if tw.hdr.Format == FormatUnknown { - tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second) - tw.hdr.AccessTime = time.Time{} - tw.hdr.ChangeTime = time.Time{} - } - - allowedFormats, paxHdrs, err := tw.hdr.allowedFormats() - switch { - case allowedFormats.has(FormatUSTAR): - tw.err = tw.writeUSTARHeader(&tw.hdr) - return tw.err - case allowedFormats.has(FormatPAX): - tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs) - return tw.err - case allowedFormats.has(FormatGNU): - tw.err = tw.writeGNUHeader(&tw.hdr) - return tw.err - default: - return err // Non-fatal error - } -} - -func (tw *Writer) writeUSTARHeader(hdr *Header) error { - // Check if we can use USTAR prefix/suffix splitting. - var namePrefix string - if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok { - namePrefix, hdr.Name = prefix, suffix - } - - // Pack the main header. - var f formatter - blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal) - f.formatString(blk.USTAR().Prefix(), namePrefix) - blk.SetFormat(FormatUSTAR) - if f.err != nil { - return f.err // Should never happen since header is validated - } - return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag) -} - -func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error { - realName, realSize := hdr.Name, hdr.Size - - // TODO(dsnet): Re-enable this when adding sparse support. - // See https://golang.org/issue/22735 - /* - // Handle sparse files. - var spd sparseDatas - var spb []byte - if len(hdr.SparseHoles) > 0 { - sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map - sph = alignSparseEntries(sph, hdr.Size) - spd = invertSparseEntries(sph, hdr.Size) - - // Format the sparse map. - hdr.Size = 0 // Replace with encoded size - spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n') - for _, s := range spd { - hdr.Size += s.Length - spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n') - spb = append(strconv.AppendInt(spb, s.Length, 10), '\n') - } - pad := blockPadding(int64(len(spb))) - spb = append(spb, zeroBlock[:pad]...) - hdr.Size += int64(len(spb)) // Accounts for encoded sparse map - - // Add and modify appropriate PAX records. - dir, file := path.Split(realName) - hdr.Name = path.Join(dir, "GNUSparseFile.0", file) - paxHdrs[paxGNUSparseMajor] = "1" - paxHdrs[paxGNUSparseMinor] = "0" - paxHdrs[paxGNUSparseName] = realName - paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10) - paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10) - delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName - } - */ - _ = realSize - - // Write PAX records to the output. - isGlobal := hdr.Typeflag == TypeXGlobalHeader - if len(paxHdrs) > 0 || isGlobal { - // Sort keys for deterministic ordering. - var keys []string - for k := range paxHdrs { - keys = append(keys, k) - } - sort.Strings(keys) - - // Write each record to a buffer. - var buf strings.Builder - for _, k := range keys { - rec, err := formatPAXRecord(k, paxHdrs[k]) - if err != nil { - return err - } - buf.WriteString(rec) - } - - // Write the extended header file. - var name string - var flag byte - if isGlobal { - name = realName - if name == "" { - name = "GlobalHead.0.0" - } - flag = TypeXGlobalHeader - } else { - dir, file := path.Split(realName) - name = path.Join(dir, "PaxHeaders.0", file) - flag = TypeXHeader - } - data := buf.String() - if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal { - return err // Global headers return here - } - } - - // Pack the main header. - var f formatter // Ignore errors since they are expected - fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) } - blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal) - blk.SetFormat(FormatPAX) - if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { - return err - } - - // TODO(dsnet): Re-enable this when adding sparse support. - // See https://golang.org/issue/22735 - /* - // Write the sparse map and setup the sparse writer if necessary. - if len(spd) > 0 { - // Use tw.curr since the sparse map is accounted for in hdr.Size. - if _, err := tw.curr.Write(spb); err != nil { - return err - } - tw.curr = &sparseFileWriter{tw.curr, spd, 0} - } - */ - return nil -} - -func (tw *Writer) writeGNUHeader(hdr *Header) error { - // Use long-link files if Name or Linkname exceeds the field size. - const longName = "././@LongLink" - if len(hdr.Name) > nameSize { - data := hdr.Name + "\x00" - if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil { - return err - } - } - if len(hdr.Linkname) > nameSize { - data := hdr.Linkname + "\x00" - if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil { - return err - } - } - - // Pack the main header. - var f formatter // Ignore errors since they are expected - var spd sparseDatas - var spb []byte - blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric) - if !hdr.AccessTime.IsZero() { - f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix()) - } - if !hdr.ChangeTime.IsZero() { - f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix()) - } - // TODO(dsnet): Re-enable this when adding sparse support. - // See https://golang.org/issue/22735 - /* - if hdr.Typeflag == TypeGNUSparse { - sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map - sph = alignSparseEntries(sph, hdr.Size) - spd = invertSparseEntries(sph, hdr.Size) - - // Format the sparse map. - formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas { - for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ { - f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset) - f.formatNumeric(sa.Entry(i).Length(), sp[0].Length) - sp = sp[1:] - } - if len(sp) > 0 { - sa.IsExtended()[0] = 1 - } - return sp - } - sp2 := formatSPD(spd, blk.GNU().Sparse()) - for len(sp2) > 0 { - var spHdr block - sp2 = formatSPD(sp2, spHdr.Sparse()) - spb = append(spb, spHdr[:]...) - } - - // Update size fields in the header block. - realSize := hdr.Size - hdr.Size = 0 // Encoded size; does not account for encoded sparse map - for _, s := range spd { - hdr.Size += s.Length - } - copy(blk.V7().Size(), zeroBlock[:]) // Reset field - f.formatNumeric(blk.V7().Size(), hdr.Size) - f.formatNumeric(blk.GNU().RealSize(), realSize) - } - */ - blk.SetFormat(FormatGNU) - if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { - return err - } - - // Write the extended sparse map and setup the sparse writer if necessary. - if len(spd) > 0 { - // Use tw.w since the sparse map is not accounted for in hdr.Size. - if _, err := tw.w.Write(spb); err != nil { - return err - } - tw.curr = &sparseFileWriter{tw.curr, spd, 0} - } - return nil -} - -type ( - stringFormatter func([]byte, string) - numberFormatter func([]byte, int64) -) - -// templateV7Plus fills out the V7 fields of a block using values from hdr. -// It also fills out fields (uname, gname, devmajor, devminor) that are -// shared in the USTAR, PAX, and GNU formats using the provided formatters. -// -// The block returned is only valid until the next call to -// templateV7Plus or writeRawFile. -func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block { - tw.blk.Reset() - - modTime := hdr.ModTime - if modTime.IsZero() { - modTime = time.Unix(0, 0) - } - - v7 := tw.blk.V7() - v7.TypeFlag()[0] = hdr.Typeflag - fmtStr(v7.Name(), hdr.Name) - fmtStr(v7.LinkName(), hdr.Linkname) - fmtNum(v7.Mode(), hdr.Mode) - fmtNum(v7.UID(), int64(hdr.Uid)) - fmtNum(v7.GID(), int64(hdr.Gid)) - fmtNum(v7.Size(), hdr.Size) - fmtNum(v7.ModTime(), modTime.Unix()) - - ustar := tw.blk.USTAR() - fmtStr(ustar.UserName(), hdr.Uname) - fmtStr(ustar.GroupName(), hdr.Gname) - fmtNum(ustar.DevMajor(), hdr.Devmajor) - fmtNum(ustar.DevMinor(), hdr.Devminor) - - return &tw.blk -} - -// writeRawFile writes a minimal file with the given name and flag type. -// It uses format to encode the header format and will write data as the body. -// It uses default values for all of the other fields (as BSD and GNU tar does). -func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error { - tw.blk.Reset() - - // Best effort for the filename. - name = toASCII(name) - if len(name) > nameSize { - name = name[:nameSize] - } - name = strings.TrimRight(name, "/") - - var f formatter - v7 := tw.blk.V7() - v7.TypeFlag()[0] = flag - f.formatString(v7.Name(), name) - f.formatOctal(v7.Mode(), 0) - f.formatOctal(v7.UID(), 0) - f.formatOctal(v7.GID(), 0) - f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB - f.formatOctal(v7.ModTime(), 0) - tw.blk.SetFormat(format) - if f.err != nil { - return f.err // Only occurs if size condition is violated - } - - // Write the header and data. - if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil { - return err - } - _, err := io.WriteString(tw, data) - return err -} - -// writeRawHeader writes the value of blk, regardless of its value. -// It sets up the Writer such that it can accept a file of the given size. -// If the flag is a special header-only flag, then the size is treated as zero. -func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error { - if err := tw.Flush(); err != nil { - return err - } - if _, err := tw.w.Write(blk[:]); err != nil { - return err - } - if isHeaderOnlyType(flag) { - size = 0 - } - tw.curr = ®FileWriter{tw.w, size} - tw.pad = blockPadding(size) - return nil -} - -// splitUSTARPath splits a path according to USTAR prefix and suffix rules. -// If the path is not splittable, then it will return ("", "", false). -func splitUSTARPath(name string) (prefix, suffix string, ok bool) { - length := len(name) - if length <= nameSize || !isASCII(name) { - return "", "", false - } else if length > prefixSize+1 { - length = prefixSize + 1 - } else if name[length-1] == '/' { - length-- - } - - i := strings.LastIndex(name[:length], "/") - nlen := len(name) - i - 1 // nlen is length of suffix - plen := i // plen is length of prefix - if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize { - return "", "", false - } - return name[:i], name[i+1:], true -} - -// Write writes to the current file in the tar archive. -// Write returns the error ErrWriteTooLong if more than -// Header.Size bytes are written after WriteHeader. -// -// Calling Write on special types like TypeLink, TypeSymlink, TypeChar, -// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless -// of what the Header.Size claims. -func (tw *Writer) Write(b []byte) (int, error) { - if tw.err != nil { - return 0, tw.err - } - n, err := tw.curr.Write(b) - if err != nil && err != ErrWriteTooLong { - tw.err = err - } - return n, err -} - -// readFrom populates the content of the current file by reading from r. -// The bytes read must match the number of remaining bytes in the current file. -// -// If the current file is sparse and r is an io.ReadSeeker, -// then readFrom uses Seek to skip past holes defined in Header.SparseHoles, -// assuming that skipped regions are all NULs. -// This always reads the last byte to ensure r is the right size. -// -// TODO(dsnet): Re-export this when adding sparse file support. -// See https://golang.org/issue/22735 -func (tw *Writer) readFrom(r io.Reader) (int64, error) { - if tw.err != nil { - return 0, tw.err - } - n, err := tw.curr.ReadFrom(r) - if err != nil && err != ErrWriteTooLong { - tw.err = err - } - return n, err -} - -// Close closes the tar archive by flushing the padding, and writing the footer. -// If the current file (from a prior call to WriteHeader) is not fully written, -// then this returns an error. -func (tw *Writer) Close() error { - if tw.err == ErrWriteAfterClose { - return nil - } - if tw.err != nil { - return tw.err - } - - // Trailer: two zero blocks. - err := tw.Flush() - for i := 0; i < 2 && err == nil; i++ { - _, err = tw.w.Write(zeroBlock[:]) - } - - // Ensure all future actions are invalid. - tw.err = ErrWriteAfterClose - return err // Report IO errors -} - -// regFileWriter is a fileWriter for writing data to a regular file entry. -type regFileWriter struct { - w io.Writer // Underlying Writer - nb int64 // Number of remaining bytes to write -} - -func (fw *regFileWriter) Write(b []byte) (n int, err error) { - overwrite := int64(len(b)) > fw.nb - if overwrite { - b = b[:fw.nb] - } - if len(b) > 0 { - n, err = fw.w.Write(b) - fw.nb -= int64(n) - } - switch { - case err != nil: - return n, err - case overwrite: - return n, ErrWriteTooLong - default: - return n, nil - } -} - -func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) { - return io.Copy(struct{ io.Writer }{fw}, r) -} - -func (fw regFileWriter) LogicalRemaining() int64 { - return fw.nb -} -func (fw regFileWriter) PhysicalRemaining() int64 { - return fw.nb -} - -// sparseFileWriter is a fileWriter for writing data to a sparse file entry. -type sparseFileWriter struct { - fw fileWriter // Underlying fileWriter - sp sparseDatas // Normalized list of data fragments - pos int64 // Current position in sparse file -} - -func (sw *sparseFileWriter) Write(b []byte) (n int, err error) { - overwrite := int64(len(b)) > sw.LogicalRemaining() - if overwrite { - b = b[:sw.LogicalRemaining()] - } - - b0 := b - endPos := sw.pos + int64(len(b)) - for endPos > sw.pos && err == nil { - var nf int // Bytes written in fragment - dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() - if sw.pos < dataStart { // In a hole fragment - bf := b[:min(int64(len(b)), dataStart-sw.pos)] - nf, err = zeroWriter{}.Write(bf) - } else { // In a data fragment - bf := b[:min(int64(len(b)), dataEnd-sw.pos)] - nf, err = sw.fw.Write(bf) - } - b = b[nf:] - sw.pos += int64(nf) - if sw.pos >= dataEnd && len(sw.sp) > 1 { - sw.sp = sw.sp[1:] // Ensure last fragment always remains - } - } - - n = len(b0) - len(b) - switch { - case err == ErrWriteTooLong: - return n, errMissData // Not possible; implies bug in validation logic - case err != nil: - return n, err - case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: - return n, errUnrefData // Not possible; implies bug in validation logic - case overwrite: - return n, ErrWriteTooLong - default: - return n, nil - } -} - -func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) { - rs, ok := r.(io.ReadSeeker) - if ok { - if _, err := rs.Seek(0, io.SeekCurrent); err != nil { - ok = false // Not all io.Seeker can really seek - } - } - if !ok { - return io.Copy(struct{ io.Writer }{sw}, r) - } - - var readLastByte bool - pos0 := sw.pos - for sw.LogicalRemaining() > 0 && !readLastByte && err == nil { - var nf int64 // Size of fragment - dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() - if sw.pos < dataStart { // In a hole fragment - nf = dataStart - sw.pos - if sw.PhysicalRemaining() == 0 { - readLastByte = true - nf-- - } - _, err = rs.Seek(nf, io.SeekCurrent) - } else { // In a data fragment - nf = dataEnd - sw.pos - nf, err = io.CopyN(sw.fw, rs, nf) - } - sw.pos += nf - if sw.pos >= dataEnd && len(sw.sp) > 1 { - sw.sp = sw.sp[1:] // Ensure last fragment always remains - } - } - - // If the last fragment is a hole, then seek to 1-byte before EOF, and - // read a single byte to ensure the file is the right size. - if readLastByte && err == nil { - _, err = mustReadFull(rs, []byte{0}) - sw.pos++ - } - - n = sw.pos - pos0 - switch { - case err == io.EOF: - return n, io.ErrUnexpectedEOF - case err == ErrWriteTooLong: - return n, errMissData // Not possible; implies bug in validation logic - case err != nil: - return n, err - case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: - return n, errUnrefData // Not possible; implies bug in validation logic - default: - return n, ensureEOF(rs) - } -} - -func (sw sparseFileWriter) LogicalRemaining() int64 { - return sw.sp[len(sw.sp)-1].endOffset() - sw.pos -} -func (sw sparseFileWriter) PhysicalRemaining() int64 { - return sw.fw.PhysicalRemaining() -} - -// zeroWriter may only be written with NULs, otherwise it returns errWriteHole. -type zeroWriter struct{} - -func (zeroWriter) Write(b []byte) (int, error) { - for i, c := range b { - if c != 0 { - return i, errWriteHole - } - } - return len(b), nil -} - -// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so. -func ensureEOF(r io.Reader) error { - n, err := tryReadFull(r, []byte{0}) - switch { - case n > 0: - return ErrWriteTooLong - case err == io.EOF: - return nil - default: - return err - } -} diff --git a/vendor/go.mongodb.org/mongo-driver/LICENSE b/vendor/go.mongodb.org/mongo-driver/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go deleted file mode 100644 index a0d8185826..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bson.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer -// See THIRD-PARTY-NOTICES for original license terms. - -package bson // import "go.mongodb.org/mongo-driver/bson" - -import ( - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// Zeroer allows custom struct types to implement a report of zero -// state. All struct types that don't implement Zeroer or where IsZero -// returns false are considered to be not zero. -type Zeroer interface { - IsZero() bool -} - -// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters, -// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead. -// -// A D should not be constructed with duplicate key names, as that can cause undefined server behavior. -// -// Example usage: -// -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} -type D = primitive.D - -// E represents a BSON element for a D. It is usually used inside a D. -type E = primitive.E - -// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not -// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be -// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead. -// -// Example usage: -// -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} -type M = primitive.M - -// An A is an ordered representation of a BSON array. -// -// Example usage: -// -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} -type A = primitive.A diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go deleted file mode 100644 index 6ca8d9ad6c..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// ArrayCodec is the Codec used for bsoncore.Array values. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ArrayCodec registered. -type ArrayCodec struct{} - -var defaultArrayCodec = NewArrayCodec() - -// NewArrayCodec returns an ArrayCodec. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ArrayCodec registered. -func NewArrayCodec() *ArrayCodec { - return &ArrayCodec{} -} - -// EncodeValue is the ValueEncoder for bsoncore.Array values. -func (ac *ArrayCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCoreArray { - return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val} - } - - arr := val.Interface().(bsoncore.Array) - return bsonrw.Copier{}.CopyArrayFromBytes(vw, arr) -} - -// DecodeValue is the ValueDecoder for bsoncore.Array values. -func (ac *ArrayCodec) DecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCoreArray { - return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val} - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, 0)) - } - - val.SetLen(0) - arr, err := bsonrw.Copier{}.AppendArrayBytes(val.Interface().(bsoncore.Array), vr) - val.Set(reflect.ValueOf(arr)) - return err -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go deleted file mode 100644 index 0693bd432f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ /dev/null @@ -1,382 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec // import "go.mongodb.org/mongo-driver/bson/bsoncodec" - -import ( - "fmt" - "reflect" - "strings" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -var ( - emptyValue = reflect.Value{} -) - -// Marshaler is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Marshaler] instead. -type Marshaler interface { - MarshalBSON() ([]byte, error) -} - -// ValueMarshaler is an interface implemented by types that can marshal -// themselves into a BSON value as bytes. The type must be the valid type for -// the bytes returned. The bytes and byte type together must be valid if the -// error is nil. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueMarshaler] instead. -type ValueMarshaler interface { - MarshalBSONValue() (bsontype.Type, []byte, error) -} - -// Unmarshaler is an interface implemented by types that can unmarshal a BSON -// document representation of themselves. The BSON bytes can be assumed to be -// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data -// after returning. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Unmarshaler] instead. -type Unmarshaler interface { - UnmarshalBSON([]byte) error -} - -// ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueUnmarshaler] instead. -type ValueUnmarshaler interface { - UnmarshalBSONValue(bsontype.Type, []byte) error -} - -// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be -// encoded by the ValueEncoder. -type ValueEncoderError struct { - Name string - Types []reflect.Type - Kinds []reflect.Kind - Received reflect.Value -} - -func (vee ValueEncoderError) Error() string { - typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds)) - for _, t := range vee.Types { - typeKinds = append(typeKinds, t.String()) - } - for _, k := range vee.Kinds { - if k == reflect.Map { - typeKinds = append(typeKinds, "map[string]*") - continue - } - typeKinds = append(typeKinds, k.String()) - } - received := vee.Received.Kind().String() - if vee.Received.IsValid() { - received = vee.Received.Type().String() - } - return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received) -} - -// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be -// decoded by the ValueDecoder. -type ValueDecoderError struct { - Name string - Types []reflect.Type - Kinds []reflect.Kind - Received reflect.Value -} - -func (vde ValueDecoderError) Error() string { - typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds)) - for _, t := range vde.Types { - typeKinds = append(typeKinds, t.String()) - } - for _, k := range vde.Kinds { - if k == reflect.Map { - typeKinds = append(typeKinds, "map[string]*") - continue - } - typeKinds = append(typeKinds, k.String()) - } - received := vde.Received.Kind().String() - if vde.Received.IsValid() { - received = vde.Received.Type().String() - } - return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received) -} - -// EncodeContext is the contextual information required for a Codec to encode a -// value. -type EncodeContext struct { - *Registry - - // MinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, - // uint, uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) - // that can represent the integer value. - // - // Deprecated: Use bson.Encoder.IntMinSize instead. - MinSize bool - - errorOnInlineDuplicates bool - stringifyMapKeysWithFmt bool - nilMapAsEmpty bool - nilSliceAsEmpty bool - nilByteSliceAsEmpty bool - omitZeroStruct bool - useJSONStructTags bool -} - -// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in -// the marshaled BSON when the "inline" struct tag option is set. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. -func (ec *EncodeContext) ErrorOnInlineDuplicates() { - ec.errorOnInlineDuplicates = true -} - -// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name -// strings using fmt.Sprintf() instead of the default string conversion logic. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. -func (ec *EncodeContext) StringifyMapKeysWithFmt() { - ec.stringifyMapKeysWithFmt = true -} - -// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON -// null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. -func (ec *EncodeContext) NilMapAsEmpty() { - ec.nilMapAsEmpty = true -} - -// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON -// null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. -func (ec *EncodeContext) NilSliceAsEmpty() { - ec.nilSliceAsEmpty = true -} - -// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values -// instead of BSON null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. -func (ec *EncodeContext) NilByteSliceAsEmpty() { - ec.nilByteSliceAsEmpty = true -} - -// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{}) -// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set. -// -// Note that the Encoder only examines exported struct fields when determining if a struct is the -// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. -func (ec *EncodeContext) OmitZeroStruct() { - ec.omitZeroStruct = true -} - -// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson" -// struct tag is not specified. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] instead. -func (ec *EncodeContext) UseJSONStructTags() { - ec.useJSONStructTags = true -} - -// DecodeContext is the contextual information required for a Codec to decode a -// value. -type DecodeContext struct { - *Registry - - // Truncate, if true, instructs decoders to to truncate the fractional part of BSON "double" - // values when attempting to unmarshal them into a Go integer (int, int8, int16, int32, int64, - // uint, uint8, uint16, uint32, or uint64) struct field. The truncation logic does not apply to - // BSON "decimal128" values. - // - // Deprecated: Use bson.Decoder.AllowTruncatingDoubles instead. - Truncate bool - - // Ancestor is the type of a containing document. This is mainly used to determine what type - // should be used when decoding an embedded document into an empty interface. For example, if - // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface - // will be decoded into a bson.M. - // - // Deprecated: Use bson.Decoder.DefaultDocumentM or bson.Decoder.DefaultDocumentD instead. - Ancestor reflect.Type - - // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the - // usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is - // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an - // error. DocumentType overrides the Ancestor field. - defaultDocumentType reflect.Type - - binaryAsSlice bool - useJSONStructTags bool - useLocalTimeZone bool - zeroMaps bool - zeroStructs bool -} - -// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or -// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. -func (dc *DecodeContext) BinaryAsSlice() { - dc.binaryAsSlice = true -} - -// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson" -// struct tag is not specified. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. -func (dc *DecodeContext) UseJSONStructTags() { - dc.useJSONStructTags = true -} - -// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead -// of the UTC timezone. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. -func (dc *DecodeContext) UseLocalTimeZone() { - dc.useLocalTimeZone = true -} - -// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value -// passed to Decode before unmarshaling BSON documents into them. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. -func (dc *DecodeContext) ZeroMaps() { - dc.zeroMaps = true -} - -// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination -// value passed to Decode before unmarshaling BSON documents into them. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. -func (dc *DecodeContext) ZeroStructs() { - dc.zeroStructs = true -} - -// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This -// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentM] instead. -func (dc *DecodeContext) DefaultDocumentM() { - dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) -} - -// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This -// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentD] instead. -func (dc *DecodeContext) DefaultDocumentD() { - dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) -} - -// ValueCodec is an interface for encoding and decoding a reflect.Value. -// values. -// -// Deprecated: Use [ValueEncoder] and [ValueDecoder] instead. -type ValueCodec interface { - ValueEncoder - ValueDecoder -} - -// ValueEncoder is the interface implemented by types that can handle the encoding of a value. -type ValueEncoder interface { - EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error -} - -// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be -// used as a ValueEncoder. -type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error - -// EncodeValue implements the ValueEncoder interface. -func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - return fn(ec, vw, val) -} - -// ValueDecoder is the interface implemented by types that can handle the decoding of a value. -type ValueDecoder interface { - DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error -} - -// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be -// used as a ValueDecoder. -type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error - -// DecodeValue implements the ValueDecoder interface. -func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - return fn(dc, vr, val) -} - -// typeDecoder is the interface implemented by types that can handle the decoding of a value given its type. -type typeDecoder interface { - decodeType(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) -} - -// typeDecoderFunc is an adapter function that allows a function with the correct signature to be used as a typeDecoder. -type typeDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) - -func (fn typeDecoderFunc) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - return fn(dc, vr, t) -} - -// decodeAdapter allows two functions with the correct signatures to be used as both a ValueDecoder and typeDecoder. -type decodeAdapter struct { - ValueDecoderFunc - typeDecoderFunc -} - -var _ ValueDecoder = decodeAdapter{} -var _ typeDecoder = decodeAdapter{} - -// decodeTypeOrValue calls decoder.decodeType is decoder is a typeDecoder. Otherwise, it allocates a new element of type -// t and calls decoder.DecodeValue on it. -func decodeTypeOrValue(decoder ValueDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - td, _ := decoder.(typeDecoder) - return decodeTypeOrValueWithInfo(decoder, td, dc, vr, t, true) -} - -func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type, convert bool) (reflect.Value, error) { - if td != nil { - val, err := td.decodeType(dc, vr, t) - if err == nil && convert && val.Type() != t { - // This conversion step is necessary for slices and maps. If a user declares variables like: - // - // type myBool bool - // var m map[string]myBool - // - // and tries to decode BSON bytes into the map, the decoding will fail if this conversion is not present - // because we'll try to assign a value of type bool to one of type myBool. - val = val.Convert(t) - } - return val, err - } - - val := reflect.New(t).Elem() - err := vd.DecodeValue(dc, vr, val) - return val, err -} - -// CodecZeroer is the interface implemented by Codecs that can also determine if -// a value of the type that would be encoded is zero. -// -// Deprecated: Defining custom rules for the zero/empty value will not be supported in Go Driver -// 2.0. Users who want to omit empty complex values should use a pointer field and set the value to -// nil instead. -type CodecZeroer interface { - IsTypeZero(interface{}) bool -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go deleted file mode 100644 index dde3e76815..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// ByteSliceCodec is the Codec used for []byte values. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ByteSliceCodec registered. -type ByteSliceCodec struct { - // EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values - // instead of BSON null. - // - // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty instead. - EncodeNilAsEmpty bool -} - -var ( - defaultByteSliceCodec = NewByteSliceCodec() - - // Assert that defaultByteSliceCodec satisfies the typeDecoder interface, which allows it to be - // used by collection type decoders (e.g. map, slice, etc) to set individual values in a - // collection. - _ typeDecoder = defaultByteSliceCodec -) - -// NewByteSliceCodec returns a ByteSliceCodec with options opts. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ByteSliceCodec registered. -func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { - byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) - codec := ByteSliceCodec{} - if byteSliceOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *byteSliceOpt.EncodeNilAsEmpty - } - return &codec -} - -// EncodeValue is the ValueEncoder for []byte. -func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tByteSlice { - return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - if val.IsNil() && !bsc.EncodeNilAsEmpty && !ec.nilByteSliceAsEmpty { - return vw.WriteNull() - } - return vw.WriteBinary(val.Interface().([]byte)) -} - -func (bsc *ByteSliceCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tByteSlice { - return emptyValue, ValueDecoderError{ - Name: "ByteSliceDecodeValue", - Types: []reflect.Type{tByteSlice}, - Received: reflect.Zero(t), - } - } - - var data []byte - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - str, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - data = []byte(str) - case bsontype.Symbol: - sym, err := vr.ReadSymbol() - if err != nil { - return emptyValue, err - } - data = []byte(sym) - case bsontype.Binary: - var subtype byte - data, subtype, err = vr.ReadBinary() - if err != nil { - return emptyValue, err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "[]byte"} - } - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a []byte", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(data), nil -} - -// DecodeValue is the ValueDecoder for []byte. -func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tByteSlice { - return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - - elem, err := bsc.decodeType(dc, vr, tByteSlice) - if err != nil { - return err - } - - val.Set(elem) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go deleted file mode 100644 index 844b50299f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - "sync" - "sync/atomic" -) - -// Runtime check that the kind encoder and decoder caches can store any valid -// reflect.Kind constant. -func init() { - if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" { - panic("The capacity of kindEncoderCache is too small.\n" + - "This is due to a new type being added to reflect.Kind.") - } -} - -// statically assert array size -var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer] -var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer] - -type typeEncoderCache struct { - cache sync.Map // map[reflect.Type]ValueEncoder -} - -func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) { - c.cache.Store(rt, enc) -} - -func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) { - if v, _ := c.cache.Load(rt); v != nil { - return v.(ValueEncoder), true - } - return nil, false -} - -func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder { - if v, loaded := c.cache.LoadOrStore(rt, enc); loaded { - enc = v.(ValueEncoder) - } - return enc -} - -func (c *typeEncoderCache) Clone() *typeEncoderCache { - cc := new(typeEncoderCache) - c.cache.Range(func(k, v interface{}) bool { - if k != nil && v != nil { - cc.cache.Store(k, v) - } - return true - }) - return cc -} - -type typeDecoderCache struct { - cache sync.Map // map[reflect.Type]ValueDecoder -} - -func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) { - c.cache.Store(rt, dec) -} - -func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) { - if v, _ := c.cache.Load(rt); v != nil { - return v.(ValueDecoder), true - } - return nil, false -} - -func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder { - if v, loaded := c.cache.LoadOrStore(rt, dec); loaded { - dec = v.(ValueDecoder) - } - return dec -} - -func (c *typeDecoderCache) Clone() *typeDecoderCache { - cc := new(typeDecoderCache) - c.cache.Range(func(k, v interface{}) bool { - if k != nil && v != nil { - cc.cache.Store(k, v) - } - return true - }) - return cc -} - -// atomic.Value requires that all calls to Store() have the same concrete type -// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type -// is always the same (since different concrete types may implement the -// ValueEncoder interface). -type kindEncoderCacheEntry struct { - enc ValueEncoder -} - -type kindEncoderCache struct { - entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry -} - -func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) { - if enc != nil && rt < reflect.Kind(len(c.entries)) { - c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc}) - } -} - -func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) { - if rt < reflect.Kind(len(c.entries)) { - if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok { - return ent.enc, ent.enc != nil - } - } - return nil, false -} - -func (c *kindEncoderCache) Clone() *kindEncoderCache { - cc := new(kindEncoderCache) - for i, v := range c.entries { - if val := v.Load(); val != nil { - cc.entries[i].Store(val) - } - } - return cc -} - -// atomic.Value requires that all calls to Store() have the same concrete type -// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type -// is always the same (since different concrete types may implement the -// ValueDecoder interface). -type kindDecoderCacheEntry struct { - dec ValueDecoder -} - -type kindDecoderCache struct { - entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry -} - -func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) { - if rt < reflect.Kind(len(c.entries)) { - c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec}) - } -} - -func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) { - if rt < reflect.Kind(len(c.entries)) { - if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok { - return ent.dec, ent.dec != nil - } - } - return nil, false -} - -func (c *kindDecoderCache) Clone() *kindDecoderCache { - cc := new(kindDecoderCache) - for i, v := range c.entries { - if val := v.Load(); val != nil { - cc.entries[i].Store(val) - } - } - return cc -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go deleted file mode 100644 index cb8180f25c..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" -) - -// condAddrEncoder is the encoder used when a pointer to the encoding value has an encoder. -type condAddrEncoder struct { - canAddrEnc ValueEncoder - elseEnc ValueEncoder -} - -var _ ValueEncoder = (*condAddrEncoder)(nil) - -// newCondAddrEncoder returns an condAddrEncoder. -func newCondAddrEncoder(canAddrEnc, elseEnc ValueEncoder) *condAddrEncoder { - encoder := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} - return &encoder -} - -// EncodeValue is the ValueEncoderFunc for a value that may be addressable. -func (cae *condAddrEncoder) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.CanAddr() { - return cae.canAddrEnc.EncodeValue(ec, vw, val) - } - if cae.elseEnc != nil { - return cae.elseEnc.EncodeValue(ec, vw, val) - } - return ErrNoEncoder{Type: val.Type()} -} - -// condAddrDecoder is the decoder used when a pointer to the value has a decoder. -type condAddrDecoder struct { - canAddrDec ValueDecoder - elseDec ValueDecoder -} - -var _ ValueDecoder = (*condAddrDecoder)(nil) - -// newCondAddrDecoder returns an CondAddrDecoder. -func newCondAddrDecoder(canAddrDec, elseDec ValueDecoder) *condAddrDecoder { - decoder := condAddrDecoder{canAddrDec: canAddrDec, elseDec: elseDec} - return &decoder -} - -// DecodeValue is the ValueDecoderFunc for a value that may be addressable. -func (cad *condAddrDecoder) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if val.CanAddr() { - return cad.canAddrDec.DecodeValue(dc, vr, val) - } - if cad.elseDec != nil { - return cad.elseDec.DecodeValue(dc, vr, val) - } - return ErrNoDecoder{Type: val.Type()} -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go deleted file mode 100644 index 2ce119731b..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ /dev/null @@ -1,1807 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "net/url" - "reflect" - "strconv" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var ( - defaultValueDecoders DefaultValueDecoders - errCannotTruncate = errors.New("float64 can only be truncated to a lower precision type when truncation is enabled") -) - -type decodeBinaryError struct { - subtype byte - typeName string -} - -func (d decodeBinaryError) Error() string { - return fmt.Sprintf("only binary values with subtype 0x00 or 0x02 can be decoded into %s, but got subtype %v", d.typeName, d.subtype) -} - -func newDefaultStructCodec() *StructCodec { - codec, err := NewStructCodec(DefaultStructTagParser) - if err != nil { - // This function is called from the codec registration path, so errors can't be propagated. If there's an error - // constructing the StructCodec, we panic to avoid losing it. - panic(fmt.Errorf("error creating default StructCodec: %v", err)) - } - return codec -} - -// DefaultValueDecoders is a namespace type for the default ValueDecoders used -// when creating a registry. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -type DefaultValueDecoders struct{} - -// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with -// the provided RegistryBuilder. -// -// There is no support for decoding map[string]interface{} because there is no decoder for -// interface{}, so users must either register this decoder themselves or use the -// EmptyInterfaceDecoder available in the bson package. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { - if rb == nil { - panic(errors.New("argument to RegisterDefaultDecoders must not be nil")) - } - - intDecoder := decodeAdapter{dvd.IntDecodeValue, dvd.intDecodeType} - floatDecoder := decodeAdapter{dvd.FloatDecodeValue, dvd.floatDecodeType} - - rb. - RegisterTypeDecoder(tD, ValueDecoderFunc(dvd.DDecodeValue)). - RegisterTypeDecoder(tBinary, decodeAdapter{dvd.BinaryDecodeValue, dvd.binaryDecodeType}). - RegisterTypeDecoder(tUndefined, decodeAdapter{dvd.UndefinedDecodeValue, dvd.undefinedDecodeType}). - RegisterTypeDecoder(tDateTime, decodeAdapter{dvd.DateTimeDecodeValue, dvd.dateTimeDecodeType}). - RegisterTypeDecoder(tNull, decodeAdapter{dvd.NullDecodeValue, dvd.nullDecodeType}). - RegisterTypeDecoder(tRegex, decodeAdapter{dvd.RegexDecodeValue, dvd.regexDecodeType}). - RegisterTypeDecoder(tDBPointer, decodeAdapter{dvd.DBPointerDecodeValue, dvd.dBPointerDecodeType}). - RegisterTypeDecoder(tTimestamp, decodeAdapter{dvd.TimestampDecodeValue, dvd.timestampDecodeType}). - RegisterTypeDecoder(tMinKey, decodeAdapter{dvd.MinKeyDecodeValue, dvd.minKeyDecodeType}). - RegisterTypeDecoder(tMaxKey, decodeAdapter{dvd.MaxKeyDecodeValue, dvd.maxKeyDecodeType}). - RegisterTypeDecoder(tJavaScript, decodeAdapter{dvd.JavaScriptDecodeValue, dvd.javaScriptDecodeType}). - RegisterTypeDecoder(tSymbol, decodeAdapter{dvd.SymbolDecodeValue, dvd.symbolDecodeType}). - RegisterTypeDecoder(tByteSlice, defaultByteSliceCodec). - RegisterTypeDecoder(tTime, defaultTimeCodec). - RegisterTypeDecoder(tEmpty, defaultEmptyInterfaceCodec). - RegisterTypeDecoder(tCoreArray, defaultArrayCodec). - RegisterTypeDecoder(tOID, decodeAdapter{dvd.ObjectIDDecodeValue, dvd.objectIDDecodeType}). - RegisterTypeDecoder(tDecimal, decodeAdapter{dvd.Decimal128DecodeValue, dvd.decimal128DecodeType}). - RegisterTypeDecoder(tJSONNumber, decodeAdapter{dvd.JSONNumberDecodeValue, dvd.jsonNumberDecodeType}). - RegisterTypeDecoder(tURL, decodeAdapter{dvd.URLDecodeValue, dvd.urlDecodeType}). - RegisterTypeDecoder(tCoreDocument, ValueDecoderFunc(dvd.CoreDocumentDecodeValue)). - RegisterTypeDecoder(tCodeWithScope, decodeAdapter{dvd.CodeWithScopeDecodeValue, dvd.codeWithScopeDecodeType}). - RegisterDefaultDecoder(reflect.Bool, decodeAdapter{dvd.BooleanDecodeValue, dvd.booleanDecodeType}). - RegisterDefaultDecoder(reflect.Int, intDecoder). - RegisterDefaultDecoder(reflect.Int8, intDecoder). - RegisterDefaultDecoder(reflect.Int16, intDecoder). - RegisterDefaultDecoder(reflect.Int32, intDecoder). - RegisterDefaultDecoder(reflect.Int64, intDecoder). - RegisterDefaultDecoder(reflect.Uint, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint8, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint16, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint32, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint64, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Float32, floatDecoder). - RegisterDefaultDecoder(reflect.Float64, floatDecoder). - RegisterDefaultDecoder(reflect.Array, ValueDecoderFunc(dvd.ArrayDecodeValue)). - RegisterDefaultDecoder(reflect.Map, defaultMapCodec). - RegisterDefaultDecoder(reflect.Slice, defaultSliceCodec). - RegisterDefaultDecoder(reflect.String, defaultStringCodec). - RegisterDefaultDecoder(reflect.Struct, newDefaultStructCodec()). - RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()). - RegisterTypeMapEntry(bsontype.Double, tFloat64). - RegisterTypeMapEntry(bsontype.String, tString). - RegisterTypeMapEntry(bsontype.Array, tA). - RegisterTypeMapEntry(bsontype.Binary, tBinary). - RegisterTypeMapEntry(bsontype.Undefined, tUndefined). - RegisterTypeMapEntry(bsontype.ObjectID, tOID). - RegisterTypeMapEntry(bsontype.Boolean, tBool). - RegisterTypeMapEntry(bsontype.DateTime, tDateTime). - RegisterTypeMapEntry(bsontype.Regex, tRegex). - RegisterTypeMapEntry(bsontype.DBPointer, tDBPointer). - RegisterTypeMapEntry(bsontype.JavaScript, tJavaScript). - RegisterTypeMapEntry(bsontype.Symbol, tSymbol). - RegisterTypeMapEntry(bsontype.CodeWithScope, tCodeWithScope). - RegisterTypeMapEntry(bsontype.Int32, tInt32). - RegisterTypeMapEntry(bsontype.Int64, tInt64). - RegisterTypeMapEntry(bsontype.Timestamp, tTimestamp). - RegisterTypeMapEntry(bsontype.Decimal128, tDecimal). - RegisterTypeMapEntry(bsontype.MinKey, tMinKey). - RegisterTypeMapEntry(bsontype.MaxKey, tMaxKey). - RegisterTypeMapEntry(bsontype.Type(0), tD). - RegisterTypeMapEntry(bsontype.EmbeddedDocument, tD). - RegisterHookDecoder(tValueUnmarshaler, ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue)). - RegisterHookDecoder(tUnmarshaler, ValueDecoderFunc(dvd.UnmarshalerDecodeValue)) -} - -// DDecodeValue is the ValueDecoderFunc for primitive.D instances. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || !val.CanSet() || val.Type() != tD { - return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - dc.Ancestor = tD - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return fmt.Errorf("cannot decode %v into a primitive.D", vrType) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - decoder, err := dc.LookupDecoder(tEmpty) - if err != nil { - return err - } - tEmptyTypeDecoder, _ := decoder.(typeDecoder) - - // Use the elements in the provided value if it's non nil. Otherwise, allocate a new D instance. - var elems primitive.D - if !val.IsNil() { - val.SetLen(0) - elems = val.Interface().(primitive.D) - } else { - elems = make(primitive.D, 0) - } - - for { - key, elemVr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } else if err != nil { - return err - } - - // Pass false for convert because we don't need to call reflect.Value.Convert for tEmpty. - elem, err := decodeTypeOrValueWithInfo(decoder, tEmptyTypeDecoder, dc, elemVr, tEmpty, false) - if err != nil { - return err - } - - elems = append(elems, primitive.E{Key: key, Value: elem.Interface()}) - } - - val.Set(reflect.ValueOf(elems)) - return nil -} - -func (dvd DefaultValueDecoders) booleanDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t.Kind() != reflect.Bool { - return emptyValue, ValueDecoderError{ - Name: "BooleanDecodeValue", - Kinds: []reflect.Kind{reflect.Bool}, - Received: reflect.Zero(t), - } - } - - var b bool - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - b = (i32 != 0) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - b = (i64 != 0) - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - b = (f64 != 0) - case bsontype.Boolean: - b, err = vr.ReadBoolean() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a boolean", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(b), nil -} - -// BooleanDecodeValue is the ValueDecoderFunc for bool types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool { - return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} - } - - elem, err := dvd.booleanDecodeType(dctx, vr, val.Type()) - if err != nil { - return err - } - - val.SetBool(elem.Bool()) - return nil -} - -func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var i64 int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return emptyValue, err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return emptyValue, errCannotTruncate - } - if f64 > float64(math.MaxInt64) { - return emptyValue, fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - i64 = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) - } - - switch t.Kind() { - case reflect.Int8: - if i64 < math.MinInt8 || i64 > math.MaxInt8 { - return emptyValue, fmt.Errorf("%d overflows int8", i64) - } - - return reflect.ValueOf(int8(i64)), nil - case reflect.Int16: - if i64 < math.MinInt16 || i64 > math.MaxInt16 { - return emptyValue, fmt.Errorf("%d overflows int16", i64) - } - - return reflect.ValueOf(int16(i64)), nil - case reflect.Int32: - if i64 < math.MinInt32 || i64 > math.MaxInt32 { - return emptyValue, fmt.Errorf("%d overflows int32", i64) - } - - return reflect.ValueOf(int32(i64)), nil - case reflect.Int64: - return reflect.ValueOf(i64), nil - case reflect.Int: - if int64(int(i64)) != i64 { // Can we fit this inside of an int - return emptyValue, fmt.Errorf("%d overflows int", i64) - } - - return reflect.ValueOf(int(i64)), nil - default: - return emptyValue, ValueDecoderError{ - Name: "IntDecodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: reflect.Zero(t), - } - } -} - -// IntDecodeValue is the ValueDecoderFunc for int types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "IntDecodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: val, - } - } - - elem, err := dvd.intDecodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.SetInt(elem.Int()) - return nil -} - -// UintDecodeValue is the ValueDecoderFunc for uint types. -// -// Deprecated: UintDecodeValue is not registered by default. Use UintCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - var i64 int64 - var err error - switch vr.Type() { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled") - } - if f64 > float64(math.MaxInt64) { - return fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return err - } - if b { - i64 = 1 - } - default: - return fmt.Errorf("cannot decode %v into an integer type", vr.Type()) - } - - if !val.CanSet() { - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - switch val.Kind() { - case reflect.Uint8: - if i64 < 0 || i64 > math.MaxUint8 { - return fmt.Errorf("%d overflows uint8", i64) - } - case reflect.Uint16: - if i64 < 0 || i64 > math.MaxUint16 { - return fmt.Errorf("%d overflows uint16", i64) - } - case reflect.Uint32: - if i64 < 0 || i64 > math.MaxUint32 { - return fmt.Errorf("%d overflows uint32", i64) - } - case reflect.Uint64: - if i64 < 0 { - return fmt.Errorf("%d overflows uint64", i64) - } - case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint - return fmt.Errorf("%d overflows uint", i64) - } - default: - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - val.SetUint(uint64(i64)) - return nil -} - -func (dvd DefaultValueDecoders) floatDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var f float64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - f = float64(i32) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - f = float64(i64) - case bsontype.Double: - f, err = vr.ReadDouble() - if err != nil { - return emptyValue, err - } - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - f = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a float32 or float64 type", vrType) - } - - switch t.Kind() { - case reflect.Float32: - if !dc.Truncate && float64(float32(f)) != f { - return emptyValue, errCannotTruncate - } - - return reflect.ValueOf(float32(f)), nil - case reflect.Float64: - return reflect.ValueOf(f), nil - default: - return emptyValue, ValueDecoderError{ - Name: "FloatDecodeValue", - Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, - Received: reflect.Zero(t), - } - } -} - -// FloatDecodeValue is the ValueDecoderFunc for float types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "FloatDecodeValue", - Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, - Received: val, - } - } - - elem, err := dvd.floatDecodeType(ec, vr, val.Type()) - if err != nil { - return err - } - - val.SetFloat(elem.Float()) - return nil -} - -// StringDecodeValue is the ValueDecoderFunc for string types. -// -// Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) StringDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - var str string - var err error - switch vr.Type() { - // TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed. - case bsontype.String: - str, err = vr.ReadString() - if err != nil { - return err - } - default: - return fmt.Errorf("cannot decode %v into a string type", vr.Type()) - } - if !val.CanSet() || val.Kind() != reflect.String { - return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} - } - - val.SetString(str) - return nil -} - -func (DefaultValueDecoders) javaScriptDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tJavaScript { - return emptyValue, ValueDecoderError{ - Name: "JavaScriptDecodeValue", - Types: []reflect.Type{tJavaScript}, - Received: reflect.Zero(t), - } - } - - var js string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.JavaScript: - js, err = vr.ReadJavascript() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.JavaScript", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.JavaScript(js)), nil -} - -// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tJavaScript { - return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} - } - - elem, err := dvd.javaScriptDecodeType(dctx, vr, tJavaScript) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} - -func (DefaultValueDecoders) symbolDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tSymbol { - return emptyValue, ValueDecoderError{ - Name: "SymbolDecodeValue", - Types: []reflect.Type{tSymbol}, - Received: reflect.Zero(t), - } - } - - var symbol string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - symbol, err = vr.ReadString() - case bsontype.Symbol: - symbol, err = vr.ReadSymbol() - case bsontype.Binary: - data, subtype, err := vr.ReadBinary() - if err != nil { - return emptyValue, err - } - - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "primitive.Symbol"} - } - symbol = string(data) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Symbol", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Symbol(symbol)), nil -} - -// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tSymbol { - return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} - } - - elem, err := dvd.symbolDecodeType(dctx, vr, tSymbol) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} - -func (DefaultValueDecoders) binaryDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tBinary { - return emptyValue, ValueDecoderError{ - Name: "BinaryDecodeValue", - Types: []reflect.Type{tBinary}, - Received: reflect.Zero(t), - } - } - - var data []byte - var subtype byte - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Binary: - data, subtype, err = vr.ReadBinary() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Binary", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data}), nil -} - -// BinaryDecodeValue is the ValueDecoderFunc for Binary. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tBinary { - return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} - } - - elem, err := dvd.binaryDecodeType(dc, vr, tBinary) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) undefinedDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tUndefined { - return emptyValue, ValueDecoderError{ - Name: "UndefinedDecodeValue", - Types: []reflect.Type{tUndefined}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Undefined: - err = vr.ReadUndefined() - case bsontype.Null: - err = vr.ReadNull() - default: - return emptyValue, fmt.Errorf("cannot decode %v into an Undefined", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Undefined{}), nil -} - -// UndefinedDecodeValue is the ValueDecoderFunc for Undefined. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tUndefined { - return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} - } - - elem, err := dvd.undefinedDecodeType(dc, vr, tUndefined) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// Accept both 12-byte string and pretty-printed 24-byte hex string formats. -func (dvd DefaultValueDecoders) objectIDDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tOID { - return emptyValue, ValueDecoderError{ - Name: "ObjectIDDecodeValue", - Types: []reflect.Type{tOID}, - Received: reflect.Zero(t), - } - } - - var oid primitive.ObjectID - var err error - switch vrType := vr.Type(); vrType { - case bsontype.ObjectID: - oid, err = vr.ReadObjectID() - if err != nil { - return emptyValue, err - } - case bsontype.String: - str, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - if oid, err = primitive.ObjectIDFromHex(str); err == nil { - break - } - if len(str) != 12 { - return emptyValue, fmt.Errorf("an ObjectID string must be exactly 12 bytes long (got %v)", len(str)) - } - byteArr := []byte(str) - copy(oid[:], byteArr) - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an ObjectID", vrType) - } - - return reflect.ValueOf(oid), nil -} - -// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tOID { - return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} - } - - elem, err := dvd.objectIDDecodeType(dc, vr, tOID) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) dateTimeDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDateTime { - return emptyValue, ValueDecoderError{ - Name: "DateTimeDecodeValue", - Types: []reflect.Type{tDateTime}, - Received: reflect.Zero(t), - } - } - - var dt int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.DateTime: - dt, err = vr.ReadDateTime() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a DateTime", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.DateTime(dt)), nil -} - -// DateTimeDecodeValue is the ValueDecoderFunc for DateTime. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDateTime { - return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} - } - - elem, err := dvd.dateTimeDecodeType(dc, vr, tDateTime) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) nullDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tNull { - return emptyValue, ValueDecoderError{ - Name: "NullDecodeValue", - Types: []reflect.Type{tNull}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Undefined: - err = vr.ReadUndefined() - case bsontype.Null: - err = vr.ReadNull() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Null", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Null{}), nil -} - -// NullDecodeValue is the ValueDecoderFunc for Null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tNull { - return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} - } - - elem, err := dvd.nullDecodeType(dc, vr, tNull) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) regexDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tRegex { - return emptyValue, ValueDecoderError{ - Name: "RegexDecodeValue", - Types: []reflect.Type{tRegex}, - Received: reflect.Zero(t), - } - } - - var pattern, options string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Regex: - pattern, options, err = vr.ReadRegex() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Regex", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options}), nil -} - -// RegexDecodeValue is the ValueDecoderFunc for Regex. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tRegex { - return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} - } - - elem, err := dvd.regexDecodeType(dc, vr, tRegex) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) dBPointerDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDBPointer { - return emptyValue, ValueDecoderError{ - Name: "DBPointerDecodeValue", - Types: []reflect.Type{tDBPointer}, - Received: reflect.Zero(t), - } - } - - var ns string - var pointer primitive.ObjectID - var err error - switch vrType := vr.Type(); vrType { - case bsontype.DBPointer: - ns, pointer, err = vr.ReadDBPointer() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a DBPointer", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer}), nil -} - -// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDBPointer { - return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} - } - - elem, err := dvd.dBPointerDecodeType(dc, vr, tDBPointer) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) timestampDecodeType(_ DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { - if reflectType != tTimestamp { - return emptyValue, ValueDecoderError{ - Name: "TimestampDecodeValue", - Types: []reflect.Type{tTimestamp}, - Received: reflect.Zero(reflectType), - } - } - - var t, incr uint32 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Timestamp: - t, incr, err = vr.ReadTimestamp() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Timestamp", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Timestamp{T: t, I: incr}), nil -} - -// TimestampDecodeValue is the ValueDecoderFunc for Timestamp. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tTimestamp { - return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} - } - - elem, err := dvd.timestampDecodeType(dc, vr, tTimestamp) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) minKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tMinKey { - return emptyValue, ValueDecoderError{ - Name: "MinKeyDecodeValue", - Types: []reflect.Type{tMinKey}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.MinKey: - err = vr.ReadMinKey() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a MinKey", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.MinKey{}), nil -} - -// MinKeyDecodeValue is the ValueDecoderFunc for MinKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tMinKey { - return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} - } - - elem, err := dvd.minKeyDecodeType(dc, vr, tMinKey) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) maxKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tMaxKey { - return emptyValue, ValueDecoderError{ - Name: "MaxKeyDecodeValue", - Types: []reflect.Type{tMaxKey}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.MaxKey: - err = vr.ReadMaxKey() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a MaxKey", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.MaxKey{}), nil -} - -// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tMaxKey { - return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} - } - - elem, err := dvd.maxKeyDecodeType(dc, vr, tMaxKey) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) decimal128DecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDecimal { - return emptyValue, ValueDecoderError{ - Name: "Decimal128DecodeValue", - Types: []reflect.Type{tDecimal}, - Received: reflect.Zero(t), - } - } - - var d128 primitive.Decimal128 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Decimal128: - d128, err = vr.ReadDecimal128() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(d128), nil -} - -// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDecimal { - return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} - } - - elem, err := dvd.decimal128DecodeType(dctx, vr, tDecimal) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) jsonNumberDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tJSONNumber { - return emptyValue, ValueDecoderError{ - Name: "JSONNumberDecodeValue", - Types: []reflect.Type{tJSONNumber}, - Received: reflect.Zero(t), - } - } - - var jsonNum json.Number - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatFloat(f64, 'f', -1, 64)) - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatInt(int64(i32), 10)) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatInt(i64, 10)) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a json.Number", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(jsonNum), nil -} - -// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tJSONNumber { - return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} - } - - elem, err := dvd.jsonNumberDecodeType(dc, vr, tJSONNumber) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) urlDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tURL { - return emptyValue, ValueDecoderError{ - Name: "URLDecodeValue", - Types: []reflect.Type{tURL}, - Received: reflect.Zero(t), - } - } - - urlPtr := &url.URL{} - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - var str string // Declare str here to avoid shadowing err during the ReadString call. - str, err = vr.ReadString() - if err != nil { - return emptyValue, err - } - - urlPtr, err = url.Parse(str) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a *url.URL", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(urlPtr).Elem(), nil -} - -// URLDecodeValue is the ValueDecoderFunc for url.URL. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tURL { - return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} - } - - elem, err := dvd.urlDecodeType(dc, vr, tURL) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// TimeDecodeValue is the ValueDecoderFunc for time.Time. -// -// Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) TimeDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if vr.Type() != bsontype.DateTime { - return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) - } - - dt, err := vr.ReadDateTime() - if err != nil { - return err - } - - if !val.CanSet() || val.Type() != tTime { - return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} - } - - val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000).UTC())) - return nil -} - -// ByteSliceDecodeValue is the ValueDecoderFunc for []byte. -// -// Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) ByteSliceDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { - return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) - } - - if !val.CanSet() || val.Type() != tByteSlice { - return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - - if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - } - - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != 0x00 { - return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype) - } - - val.Set(reflect.ValueOf(data)) - return nil -} - -// MapDecodeValue is the ValueDecoderFunc for map[string]* types. -// -// Deprecated: MapDecodeValue is not registered by default. Use MapCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { - return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - switch vr.Type() { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeMap(val.Type())) - } - - eType := val.Type().Elem() - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return err - } - - if eType == tEmpty { - dc.Ancestor = val.Type() - } - - keyType := val.Type().Key() - for { - key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } - if err != nil { - return err - } - - elem := reflect.New(eType).Elem() - - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - - val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem) - } - return nil -} - -// ArrayDecodeValue is the ValueDecoderFunc for array types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Array { - return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Array: - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - case bsontype.Binary: - if val.Type().Elem() != tByte { - return fmt.Errorf("ArrayDecodeValue can only be used to decode binary into a byte array, got %v", vrType) - } - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return fmt.Errorf("ArrayDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) - } - - if len(data) > val.Len() { - return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type()) - } - - for idx, elem := range data { - val.Index(idx).Set(reflect.ValueOf(elem)) - } - return nil - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - default: - return fmt.Errorf("cannot decode %v into an array", vrType) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - elemsFunc = dvd.decodeD - default: - elemsFunc = dvd.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if len(elems) > val.Len() { - return fmt.Errorf("more elements returned in array than can fit inside %s, got %v elements", val.Type(), len(elems)) - } - - for idx, elem := range elems { - val.Index(idx).Set(elem) - } - - return nil -} - -// SliceDecodeValue is the ValueDecoderFunc for slice types. -// -// Deprecated: SliceDecodeValue is not registered by default. Use SliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Slice { - return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vr.Type() { - case bsontype.Array: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - default: - return fmt.Errorf("cannot decode %v into a slice", vr.Type()) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - dc.Ancestor = val.Type() - elemsFunc = dvd.decodeD - default: - elemsFunc = dvd.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) - } - - val.SetLen(0) - val.Set(reflect.Append(val, elems...)) - - return nil -} - -// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - - if val.Kind() == reflect.Ptr && val.IsNil() { - if !val.CanSet() { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - val.Set(reflect.New(val.Type().Elem())) - } - - if !val.Type().Implements(tValueUnmarshaler) { - if !val.CanAddr() { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. - } - - t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err - } - - m, ok := val.Interface().(ValueUnmarshaler) - if !ok { - // NB: this error should be unreachable due to the above checks - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - return m.UnmarshalBSONValue(t, src) -} - -// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - - if val.Kind() == reflect.Ptr && val.IsNil() { - if !val.CanSet() { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - val.Set(reflect.New(val.Type().Elem())) - } - - _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err - } - - // If the target Go value is a pointer and the BSON field value is empty, set the value to the - // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to - // change the pointer value from within the function (only the value at the pointer address), - // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON - // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches - // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and - // the JSON field value is "null". - if val.Kind() == reflect.Ptr && len(src) == 0 { - val.Set(reflect.Zero(val.Type())) - return nil - } - - if !val.Type().Implements(tUnmarshaler) { - if !val.CanAddr() { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. - } - - m, ok := val.Interface().(Unmarshaler) - if !ok { - // NB: this error should be unreachable due to the above checks - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - return m.UnmarshalBSON(src) -} - -// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. -// -// Deprecated: EmptyInterfaceDecodeValue is not registered by default. Use EmptyInterfaceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tEmpty { - return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - rtype, err := dc.LookupTypeMapEntry(vr.Type()) - if err != nil { - switch vr.Type() { - case bsontype.EmbeddedDocument: - if dc.Ancestor != nil { - rtype = dc.Ancestor - break - } - rtype = tD - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return err - } - } - - decoder, err := dc.LookupDecoder(rtype) - if err != nil { - return err - } - - elem := reflect.New(rtype).Elem() - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (DefaultValueDecoders) CoreDocumentDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCoreDocument { - return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, 0)) - } - - val.SetLen(0) - - cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr) - val.Set(reflect.ValueOf(cdoc)) - return err -} - -func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) { - elems := make([]reflect.Value, 0) - - ar, err := vr.ReadArray() - if err != nil { - return nil, err - } - - eType := val.Type().Elem() - - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return nil, err - } - eTypeDecoder, _ := decoder.(typeDecoder) - - idx := 0 - for { - vr, err := ar.ReadValue() - if err == bsonrw.ErrEOA { - break - } - if err != nil { - return nil, err - } - - elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) - if err != nil { - return nil, newDecodeError(strconv.Itoa(idx), err) - } - elems = append(elems, elem) - idx++ - } - - return elems, nil -} - -func (dvd DefaultValueDecoders) readCodeWithScope(dc DecodeContext, vr bsonrw.ValueReader) (primitive.CodeWithScope, error) { - var cws primitive.CodeWithScope - - code, dr, err := vr.ReadCodeWithScope() - if err != nil { - return cws, err - } - - scope := reflect.New(tD).Elem() - elems, err := dvd.decodeElemsFromDocumentReader(dc, dr) - if err != nil { - return cws, err - } - - scope.Set(reflect.MakeSlice(tD, 0, len(elems))) - scope.Set(reflect.Append(scope, elems...)) - - cws = primitive.CodeWithScope{ - Code: primitive.JavaScript(code), - Scope: scope.Interface().(primitive.D), - } - return cws, nil -} - -func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tCodeWithScope { - return emptyValue, ValueDecoderError{ - Name: "CodeWithScopeDecodeValue", - Types: []reflect.Type{tCodeWithScope}, - Received: reflect.Zero(t), - } - } - - var cws primitive.CodeWithScope - var err error - switch vrType := vr.Type(); vrType { - case bsontype.CodeWithScope: - cws, err = dvd.readCodeWithScope(dc, vr) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(cws), nil -} - -// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCodeWithScope { - return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} - } - - elem, err := dvd.codeWithScopeDecodeType(dc, vr, tCodeWithScope) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) { - switch vr.Type() { - case bsontype.Type(0), bsontype.EmbeddedDocument: - default: - return nil, fmt.Errorf("cannot decode %v into a D", vr.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return nil, err - } - - return dvd.decodeElemsFromDocumentReader(dc, dr) -} - -func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) { - decoder, err := dc.LookupDecoder(tEmpty) - if err != nil { - return nil, err - } - - elems := make([]reflect.Value, 0) - for { - key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } - if err != nil { - return nil, err - } - - val := reflect.New(tEmpty).Elem() - err = decoder.DecodeValue(dc, vr, val) - if err != nil { - return nil, newDecodeError(key, err) - } - - elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()})) - } - - return elems, nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go deleted file mode 100644 index 4ab14a668c..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ /dev/null @@ -1,856 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "net/url" - "reflect" - "sync" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var defaultValueEncoders DefaultValueEncoders - -var bvwPool = bsonrw.NewBSONValueWriterPool() - -var errInvalidValue = errors.New("cannot encode invalid element") - -var sliceWriterPool = sync.Pool{ - New: func() interface{} { - sw := make(bsonrw.SliceWriter, 0) - return &sw - }, -} - -func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error { - vw, err := dw.WriteDocumentElement(e.Key) - if err != nil { - return err - } - - if e.Value == nil { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value)) - if err != nil { - return err - } - - err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value)) - if err != nil { - return err - } - return nil -} - -// DefaultValueEncoders is a namespace type for the default ValueEncoders used -// when creating a registry. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -type DefaultValueEncoders struct{} - -// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with -// the provided RegistryBuilder. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { - if rb == nil { - panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) - } - rb. - RegisterTypeEncoder(tByteSlice, defaultByteSliceCodec). - RegisterTypeEncoder(tTime, defaultTimeCodec). - RegisterTypeEncoder(tEmpty, defaultEmptyInterfaceCodec). - RegisterTypeEncoder(tCoreArray, defaultArrayCodec). - RegisterTypeEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)). - RegisterTypeEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)). - RegisterTypeEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)). - RegisterTypeEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)). - RegisterTypeEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)). - RegisterTypeEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)). - RegisterTypeEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)). - RegisterTypeEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)). - RegisterTypeEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)). - RegisterTypeEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)). - RegisterTypeEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)). - RegisterTypeEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)). - RegisterTypeEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)). - RegisterTypeEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)). - RegisterTypeEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)). - RegisterTypeEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)). - RegisterTypeEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)). - RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)). - RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Uint, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint8, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint16, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint32, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint64, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)). - RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)). - RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)). - RegisterDefaultEncoder(reflect.Map, defaultMapCodec). - RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec). - RegisterDefaultEncoder(reflect.String, defaultStringCodec). - RegisterDefaultEncoder(reflect.Struct, newDefaultStructCodec()). - RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()). - RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)). - RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)). - RegisterHookEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue)) -} - -// BooleanEncodeValue is the ValueEncoderFunc for bool types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) BooleanEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Bool { - return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} - } - return vw.WriteBoolean(val.Bool()) -} - -func fitsIn32Bits(i int64) bool { - return math.MinInt32 <= i && i <= math.MaxInt32 -} - -// IntEncodeValue is the ValueEncoderFunc for int types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Int8, reflect.Int16, reflect.Int32: - return vw.WriteInt32(int32(val.Int())) - case reflect.Int: - i64 := val.Int() - if fitsIn32Bits(i64) { - return vw.WriteInt32(int32(i64)) - } - return vw.WriteInt64(i64) - case reflect.Int64: - i64 := val.Int() - if ec.MinSize && fitsIn32Bits(i64) { - return vw.WriteInt32(int32(i64)) - } - return vw.WriteInt64(i64) - } - - return ValueEncoderError{ - Name: "IntEncodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: val, - } -} - -// UintEncodeValue is the ValueEncoderFunc for uint types. -// -// Deprecated: UintEncodeValue is not registered by default. Use UintCodec.EncodeValue instead. -func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Uint8, reflect.Uint16: - return vw.WriteInt32(int32(val.Uint())) - case reflect.Uint, reflect.Uint32, reflect.Uint64: - u64 := val.Uint() - if ec.MinSize && u64 <= math.MaxInt32 { - return vw.WriteInt32(int32(u64)) - } - if u64 > math.MaxInt64 { - return fmt.Errorf("%d overflows int64", u64) - } - return vw.WriteInt64(int64(u64)) - } - - return ValueEncoderError{ - Name: "UintEncodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } -} - -// FloatEncodeValue is the ValueEncoderFunc for float types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) FloatEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Float32, reflect.Float64: - return vw.WriteDouble(val.Float()) - } - - return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val} -} - -// StringEncodeValue is the ValueEncoderFunc for string types. -// -// Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead. -func (dve DefaultValueEncoders) StringEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.String { - return ValueEncoderError{ - Name: "StringEncodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: val, - } - } - - return vw.WriteString(val.String()) -} - -// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ObjectIDEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tOID { - return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} - } - return vw.WriteObjectID(val.Interface().(primitive.ObjectID)) -} - -// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) Decimal128EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDecimal { - return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} - } - return vw.WriteDecimal128(val.Interface().(primitive.Decimal128)) -} - -// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tJSONNumber { - return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} - } - jsnum := val.Interface().(json.Number) - - // Attempt int first, then float64 - if i64, err := jsnum.Int64(); err == nil { - return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64)) - } - - f64, err := jsnum.Float64() - if err != nil { - return err - } - - return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64)) -} - -// URLEncodeValue is the ValueEncoderFunc for url.URL. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) URLEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tURL { - return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} - } - u := val.Interface().(url.URL) - return vw.WriteString(u.String()) -} - -// TimeEncodeValue is the ValueEncoderFunc for time.TIme. -// -// Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead. -func (dve DefaultValueEncoders) TimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTime { - return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} - } - tt := val.Interface().(time.Time) - dt := primitive.NewDateTimeFromTime(tt) - return vw.WriteDateTime(int64(dt)) -} - -// ByteSliceEncodeValue is the ValueEncoderFunc for []byte. -// -// Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) ByteSliceEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tByteSlice { - return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - if val.IsNil() { - return vw.WriteNull() - } - return vw.WriteBinary(val.Interface().([]byte)) -} - -// MapEncodeValue is the ValueEncoderFunc for map[string]* types. -// -// Deprecated: MapEncodeValue is not registered by default. Use MapCodec.EncodeValue instead. -func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { - return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - if val.IsNil() { - // If we have a nill map but we can't WriteNull, that means we're probably trying to encode - // to a TopLevel document. We can't currently tell if this is what actually happened, but if - // there's a deeper underlying problem, the error will also be returned from WriteDocument, - // so just continue. The operations on a map reflection value are valid, so we can call - // MapKeys within mapEncodeValue without a problem. - err := vw.WriteNull() - if err == nil { - return nil - } - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - return dve.mapEncodeValue(ec, dw, val, nil) -} - -// mapEncodeValue handles encoding of the values of a map. The collisionFn returns -// true if the provided key exists, this is mainly used for inline maps in the -// struct codec. -func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - keys := val.MapKeys() - for _, key := range keys { - if collisionFn != nil && collisionFn(key.String()) { - return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) - } - - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { - return lookupErr - } - - vw, err := dw.WriteDocumentElement(key.String()) - if err != nil { - return err - } - - if lookupErr == errInvalidValue { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} - -// ArrayEncodeValue is the ValueEncoderFunc for array types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Array { - return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().Elem() == tE { - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - e := val.Index(idx).Interface().(primitive.E) - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - // If we have a []byte we want to treat it as a binary instead of as an array. - if val.Type().Elem() == tByte { - var byteSlice []byte - for idx := 0; idx < val.Len(); idx++ { - byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) - } - return vw.WriteBinary(byteSlice) - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if lookupErr == errInvalidValue { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -// SliceEncodeValue is the ValueEncoderFunc for slice types. -// -// Deprecated: SliceEncodeValue is not registered by default. Use SliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Slice { - return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().ConvertibleTo(tD) { - d := val.Convert(tD).Interface().(primitive.D) - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for _, e := range d { - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if lookupErr == errInvalidValue { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -func (dve DefaultValueEncoders) lookupElementEncoder(ec EncodeContext, origEncoder ValueEncoder, currVal reflect.Value) (ValueEncoder, reflect.Value, error) { - if origEncoder != nil || (currVal.Kind() != reflect.Interface) { - return origEncoder, currVal, nil - } - currVal = currVal.Elem() - if !currVal.IsValid() { - return nil, currVal, errInvalidValue - } - currEncoder, err := ec.LookupEncoder(currVal.Type()) - - return currEncoder, currVal, err -} - -// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}. -// -// Deprecated: EmptyInterfaceEncodeValue is not registered by default. Use EmptyInterfaceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tEmpty { - return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(val.Elem().Type()) - if err != nil { - return err - } - - return encoder.EncodeValue(ec, vw, val.Elem()) -} - -// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement ValueMarshaler - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} - case val.Type().Implements(tValueMarshaler): - // If ValueMarshaler is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tValueMarshaler) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tValueMarshaler) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} - } - - m, ok := val.Interface().(ValueMarshaler) - if !ok { - return vw.WriteNull() - } - t, data, err := m.MarshalBSONValue() - if err != nil { - return err - } - return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) -} - -// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement Marshaler - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} - case val.Type().Implements(tMarshaler): - // If Marshaler is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tMarshaler) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tMarshaler) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} - } - - m, ok := val.Interface().(Marshaler) - if !ok { - return vw.WriteNull() - } - data, err := m.MarshalBSON() - if err != nil { - return err - } - return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) -} - -// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement Proxy - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} - case val.Type().Implements(tProxy): - // If Proxy is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tProxy) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tProxy) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} - } - - m, ok := val.Interface().(Proxy) - if !ok { - return vw.WriteNull() - } - v, err := m.ProxyBSON() - if err != nil { - return err - } - if v == nil { - encoder, err := ec.LookupEncoder(nil) - if err != nil { - return err - } - return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil)) - } - vv := reflect.ValueOf(v) - switch vv.Kind() { - case reflect.Ptr, reflect.Interface: - vv = vv.Elem() - } - encoder, err := ec.LookupEncoder(vv.Type()) - if err != nil { - return err - } - return encoder.EncodeValue(ec, vw, vv) -} - -// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) JavaScriptEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tJavaScript { - return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} - } - - return vw.WriteJavascript(val.String()) -} - -// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) SymbolEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tSymbol { - return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} - } - - return vw.WriteSymbol(val.String()) -} - -// BinaryEncodeValue is the ValueEncoderFunc for Binary. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) BinaryEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tBinary { - return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} - } - b := val.Interface().(primitive.Binary) - - return vw.WriteBinaryWithSubtype(b.Data, b.Subtype) -} - -// UndefinedEncodeValue is the ValueEncoderFunc for Undefined. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) UndefinedEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tUndefined { - return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} - } - - return vw.WriteUndefined() -} - -// DateTimeEncodeValue is the ValueEncoderFunc for DateTime. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) DateTimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDateTime { - return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} - } - - return vw.WriteDateTime(val.Int()) -} - -// NullEncodeValue is the ValueEncoderFunc for Null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) NullEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tNull { - return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} - } - - return vw.WriteNull() -} - -// RegexEncodeValue is the ValueEncoderFunc for Regex. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) RegexEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tRegex { - return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} - } - - regex := val.Interface().(primitive.Regex) - - return vw.WriteRegex(regex.Pattern, regex.Options) -} - -// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) DBPointerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDBPointer { - return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} - } - - dbp := val.Interface().(primitive.DBPointer) - - return vw.WriteDBPointer(dbp.DB, dbp.Pointer) -} - -// TimestampEncodeValue is the ValueEncoderFunc for Timestamp. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) TimestampEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTimestamp { - return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} - } - - ts := val.Interface().(primitive.Timestamp) - - return vw.WriteTimestamp(ts.T, ts.I) -} - -// MinKeyEncodeValue is the ValueEncoderFunc for MinKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) MinKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tMinKey { - return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} - } - - return vw.WriteMinKey() -} - -// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) MaxKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tMaxKey { - return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} - } - - return vw.WriteMaxKey() -} - -// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) CoreDocumentEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCoreDocument { - return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} - } - - cdoc := val.Interface().(bsoncore.Document) - - return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc) -} - -// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCodeWithScope { - return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} - } - - cws := val.Interface().(primitive.CodeWithScope) - - dw, err := vw.WriteCodeWithScope(string(cws.Code)) - if err != nil { - return err - } - - sw := sliceWriterPool.Get().(*bsonrw.SliceWriter) - defer sliceWriterPool.Put(sw) - *sw = (*sw)[:0] - - scopeVW := bvwPool.Get(sw) - defer bvwPool.Put(scopeVW) - - encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope)) - if err != nil { - return err - } - - err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope)) - if err != nil { - return err - } - - err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw) - if err != nil { - return err - } - return dw.WriteDocumentEnd() -} - -// isImplementationNil returns if val is a nil pointer and inter is implemented on a concrete type -func isImplementationNil(val reflect.Value, inter reflect.Type) bool { - vt := val.Type() - for vt.Kind() == reflect.Ptr { - vt = vt.Elem() - } - return vt.Implements(inter) && val.Kind() == reflect.Ptr && val.IsNil() -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go deleted file mode 100644 index 4613e5a1ec..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2022-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsoncodec provides a system for encoding values to BSON representations and decoding -// values from BSON representations. This package considers both binary BSON and ExtendedJSON as -// BSON representations. The types in this package enable a flexible system for handling this -// encoding and decoding. -// -// The codec system is composed of two parts: -// -// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON -// representations. -// -// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for -// retrieving them. -// -// # ValueEncoders and ValueDecoders -// -// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. -// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the -// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc -// is provided to allow use of a function with the correct signature as a ValueEncoder. An -// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and -// to provide configuration information. -// -// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that -// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to -// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext -// instance is provided and serves similar functionality to the EncodeContext. -// -// # Registry -// -// A Registry is a store for ValueEncoders, ValueDecoders, and a type map. See the Registry type -// documentation for examples of registering various custom encoders and decoders. A Registry can -// have three main types of codecs: -// -// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and -// RegisterTypeDecoder methods. The registered codec will be invoked when encoding/decoding a value -// whose type matches the registered type exactly. -// If the registered type is an interface, the codec will be invoked when encoding or decoding -// values whose type is the interface, but not for values with concrete types that implement the -// interface. -// -// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and -// RegisterHookDecoder methods. These methods only accept interface types and the registered codecs -// will be invoked when encoding or decoding values whose types implement the interface. An example -// of a hook defined by the driver is bson.Marshaler. The driver will call the MarshalBSON method -// for any value whose type implements bson.Marshaler, regardless of the value's concrete type. -// -// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type -// associations are used when decoding into a bson.D/bson.M or a struct field of type interface{}. -// For example, by default, BSON int32 and int64 values decode as Go int32 and int64 instances, -// respectively, when decoding into a bson.D. The following code would change the behavior so these -// values decode as Go int instances instead: -// -// intType := reflect.TypeOf(int(0)) -// registry.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) -// -// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and -// RegisterDefaultDecoder methods. The registered codec will be invoked when encoding or decoding -// values whose reflect.Kind matches the registered reflect.Kind as long as the value's type doesn't -// match a registered type or hook encoder/decoder first. These methods should be used to change the -// behavior for all values for a specific kind. -// -// # Registry Lookup Procedure -// -// When looking up an encoder in a Registry, the precedence rules are as follows: -// -// 1. A type encoder registered for the exact type of the value. -// -// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to -// the value. If the value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and -// bsoncodec.ValueMarshaler), the first one registered will be selected. Note that registries -// constructed using bson.NewRegistry have driver-defined hooks registered for the -// bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those will take -// precedence over any new hooks. -// -// 3. A kind encoder registered for the value's kind. -// -// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The -// same precedence rules apply for decoders, with the exception that an error of type ErrNoDecoder -// will be returned if no decoder is found. -// -// # DefaultValueEncoders and DefaultValueDecoders -// -// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and -// ValueDecoders for handling a wide range of Go types, including all of the types within the -// primitive package. To make registering these codecs easier, a helper method on each type is -// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for -// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also -// handles registering type map entries for each BSON type. -package bsoncodec diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go deleted file mode 100644 index 94f7dcf1eb..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// EmptyInterfaceCodec is the Codec used for interface{} values. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// EmptyInterfaceCodec registered. -type EmptyInterfaceCodec struct { - // DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the - // "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. - // - // Deprecated: Use bson.Decoder.BinaryAsSlice instead. - DecodeBinaryAsSlice bool -} - -var ( - defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec() - - // Assert that defaultEmptyInterfaceCodec satisfies the typeDecoder interface, which allows it - // to be used by collection type decoders (e.g. map, slice, etc) to set individual values in a - // collection. - _ typeDecoder = defaultEmptyInterfaceCodec -) - -// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// EmptyInterfaceCodec registered. -func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { - interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) - - codec := EmptyInterfaceCodec{} - if interfaceOpt.DecodeBinaryAsSlice != nil { - codec.DecodeBinaryAsSlice = *interfaceOpt.DecodeBinaryAsSlice - } - return &codec -} - -// EncodeValue is the ValueEncoderFunc for interface{}. -func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tEmpty { - return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(val.Elem().Type()) - if err != nil { - return err - } - - return encoder.EncodeValue(ec, vw, val.Elem()) -} - -func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { - isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument - if isDocument { - if dc.defaultDocumentType != nil { - // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return - // that type. - return dc.defaultDocumentType, nil - } - if dc.Ancestor != nil { - // Using ancestor information rather than looking up the type map entry forces consistent decoding. - // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry - // has been registered. - return dc.Ancestor, nil - } - } - - rtype, err := dc.LookupTypeMapEntry(valueType) - if err == nil { - return rtype, nil - } - - if isDocument { - // For documents, fallback to looking up a type map entry for bsontype.Type(0) or bsontype.EmbeddedDocument, - // depending on the original valueType. - var lookupType bsontype.Type - switch valueType { - case bsontype.Type(0): - lookupType = bsontype.EmbeddedDocument - case bsontype.EmbeddedDocument: - lookupType = bsontype.Type(0) - } - - rtype, err = dc.LookupTypeMapEntry(lookupType) - if err == nil { - return rtype, nil - } - } - - return nil, err -} - -func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tEmpty { - return emptyValue, ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.Zero(t)} - } - - rtype, err := eic.getEmptyInterfaceDecodeType(dc, vr.Type()) - if err != nil { - switch vr.Type() { - case bsontype.Null: - return reflect.Zero(t), vr.ReadNull() - default: - return emptyValue, err - } - } - - decoder, err := dc.LookupDecoder(rtype) - if err != nil { - return emptyValue, err - } - - elem, err := decodeTypeOrValue(decoder, dc, vr, rtype) - if err != nil { - return emptyValue, err - } - - if (eic.DecodeBinaryAsSlice || dc.binaryAsSlice) && rtype == tBinary { - binElem := elem.Interface().(primitive.Binary) - if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { - elem = reflect.ValueOf(binElem.Data) - } - } - - return elem, nil -} - -// DecodeValue is the ValueDecoderFunc for interface{}. -func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tEmpty { - return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - elem, err := eic.decodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.Set(elem) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go deleted file mode 100644 index 325c1738ab..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding" - "fmt" - "reflect" - "strconv" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -var defaultMapCodec = NewMapCodec() - -// MapCodec is the Codec used for map values. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// MapCodec registered. -type MapCodec struct { - // DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination - // value passed to Decode before unmarshaling BSON documents into them. - // - // Deprecated: Use bson.Decoder.ZeroMaps instead. - DecodeZerosMap bool - - // EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of - // BSON null. - // - // Deprecated: Use bson.Encoder.NilMapAsEmpty instead. - EncodeNilAsEmpty bool - - // EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name - // strings using fmt.Sprintf() instead of the default string conversion logic. - // - // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt instead. - EncodeKeysWithStringer bool -} - -// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key. -// This applies to types used as map keys and is similar to encoding.TextMarshaler. -type KeyMarshaler interface { - MarshalKey() (key string, err error) -} - -// KeyUnmarshaler is the interface implemented by an object that can unmarshal a string representation -// of itself. This applies to types used as map keys and is similar to encoding.TextUnmarshaler. -// -// UnmarshalKey must be able to decode the form generated by MarshalKey. -// UnmarshalKey must copy the text if it wishes to retain the text -// after returning. -type KeyUnmarshaler interface { - UnmarshalKey(key string) error -} - -// NewMapCodec returns a MapCodec with options opts. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// MapCodec registered. -func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { - mapOpt := bsonoptions.MergeMapCodecOptions(opts...) - - codec := MapCodec{} - if mapOpt.DecodeZerosMap != nil { - codec.DecodeZerosMap = *mapOpt.DecodeZerosMap - } - if mapOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty - } - if mapOpt.EncodeKeysWithStringer != nil { - codec.EncodeKeysWithStringer = *mapOpt.EncodeKeysWithStringer - } - return &codec -} - -// EncodeValue is the ValueEncoder for map[*]* types. -func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Map { - return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - if val.IsNil() && !mc.EncodeNilAsEmpty && !ec.nilMapAsEmpty { - // If we have a nil map but we can't WriteNull, that means we're probably trying to encode - // to a TopLevel document. We can't currently tell if this is what actually happened, but if - // there's a deeper underlying problem, the error will also be returned from WriteDocument, - // so just continue. The operations on a map reflection value are valid, so we can call - // MapKeys within mapEncodeValue without a problem. - err := vw.WriteNull() - if err == nil { - return nil - } - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - return mc.mapEncodeValue(ec, dw, val, nil) -} - -// mapEncodeValue handles encoding of the values of a map. The collisionFn returns -// true if the provided key exists, this is mainly used for inline maps in the -// struct codec. -func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - keys := val.MapKeys() - for _, key := range keys { - keyStr, err := mc.encodeKey(key, ec.stringifyMapKeysWithFmt) - if err != nil { - return err - } - - if collisionFn != nil && collisionFn(keyStr) { - return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) - } - - currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { - return lookupErr - } - - vw, err := dw.WriteDocumentElement(keyStr) - if err != nil { - return err - } - - if lookupErr == errInvalidValue { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} - -// DecodeValue is the ValueDecoder for map[string/decimal]* types. -func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if val.Kind() != reflect.Map || (!val.CanSet() && val.IsNil()) { - return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - default: - return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeMap(val.Type())) - } - - if val.Len() > 0 && (mc.DecodeZerosMap || dc.zeroMaps) { - clearMap(val) - } - - eType := val.Type().Elem() - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return err - } - eTypeDecoder, _ := decoder.(typeDecoder) - - if eType == tEmpty { - dc.Ancestor = val.Type() - } - - keyType := val.Type().Key() - - for { - key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } - if err != nil { - return err - } - - k, err := mc.decodeKey(key, keyType) - if err != nil { - return err - } - - elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) - if err != nil { - return newDecodeError(key, err) - } - - val.SetMapIndex(k, elem) - } - return nil -} - -func clearMap(m reflect.Value) { - var none reflect.Value - for _, k := range m.MapKeys() { - m.SetMapIndex(k, none) - } -} - -func (mc *MapCodec) encodeKey(val reflect.Value, encodeKeysWithStringer bool) (string, error) { - if mc.EncodeKeysWithStringer || encodeKeysWithStringer { - return fmt.Sprint(val), nil - } - - // keys of any string type are used directly - if val.Kind() == reflect.String { - return val.String(), nil - } - // KeyMarshalers are marshaled - if km, ok := val.Interface().(KeyMarshaler); ok { - if val.Kind() == reflect.Ptr && val.IsNil() { - return "", nil - } - buf, err := km.MarshalKey() - if err == nil { - return buf, nil - } - return "", err - } - // keys implement encoding.TextMarshaler are marshaled. - if km, ok := val.Interface().(encoding.TextMarshaler); ok { - if val.Kind() == reflect.Ptr && val.IsNil() { - return "", nil - } - - buf, err := km.MarshalText() - if err != nil { - return "", err - } - - return string(buf), nil - } - - switch val.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(val.Int(), 10), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return strconv.FormatUint(val.Uint(), 10), nil - } - return "", fmt.Errorf("unsupported key type: %v", val.Type()) -} - -var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() -var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() - -func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { - keyVal := reflect.ValueOf(key) - var err error - switch { - // First, if EncodeKeysWithStringer is not enabled, try to decode withKeyUnmarshaler - case !mc.EncodeKeysWithStringer && reflect.PtrTo(keyType).Implements(keyUnmarshalerType): - keyVal = reflect.New(keyType) - v := keyVal.Interface().(KeyUnmarshaler) - err = v.UnmarshalKey(key) - keyVal = keyVal.Elem() - // Try to decode encoding.TextUnmarshalers. - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): - keyVal = reflect.New(keyType) - v := keyVal.Interface().(encoding.TextUnmarshaler) - err = v.UnmarshalText([]byte(key)) - keyVal = keyVal.Elem() - // Otherwise, go to type specific behavior - default: - switch keyType.Kind() { - case reflect.String: - keyVal = reflect.ValueOf(key).Convert(keyType) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, parseErr := strconv.ParseInt(key, 10, 64) - if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) { - err = fmt.Errorf("failed to unmarshal number key %v", key) - } - keyVal = reflect.ValueOf(n).Convert(keyType) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, parseErr := strconv.ParseUint(key, 10, 64) - if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) { - err = fmt.Errorf("failed to unmarshal number key %v", key) - break - } - keyVal = reflect.ValueOf(n).Convert(keyType) - case reflect.Float32, reflect.Float64: - if mc.EncodeKeysWithStringer { - parsed, err := strconv.ParseFloat(key, 64) - if err != nil { - return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyType.Kind(), err) - } - keyVal = reflect.ValueOf(parsed) - break - } - fallthrough - default: - return keyVal, fmt.Errorf("unsupported key type: %v", keyType) - } - } - return keyVal, err -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go deleted file mode 100644 index fbd9f0a9e9..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import "fmt" - -type mode int - -const ( - _ mode = iota - mTopLevel - mDocument - mArray - mValue - mElement - mCodeWithScope - mSpacer -) - -func (m mode) String() string { - var str string - - switch m { - case mTopLevel: - str = "TopLevel" - case mDocument: - str = "DocumentMode" - case mArray: - str = "ArrayMode" - case mValue: - str = "ValueMode" - case mElement: - str = "ElementMode" - case mCodeWithScope: - str = "CodeWithScopeMode" - case mSpacer: - str = "CodeWithScopeSpacerFrame" - default: - str = "UnknownMode" - } - - return str -} - -// TransitionError is an error returned when an invalid progressing a -// ValueReader or ValueWriter state machine occurs. -type TransitionError struct { - parent mode - current mode - destination mode -} - -func (te TransitionError) Error() string { - if te.destination == mode(0) { - return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current) - } - if te.parent == mode(0) { - return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination) - } - return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go deleted file mode 100644 index e5923230b0..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -var _ ValueEncoder = &PointerCodec{} -var _ ValueDecoder = &PointerCodec{} - -// PointerCodec is the Codec used for pointers. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// PointerCodec registered. -type PointerCodec struct { - ecache typeEncoderCache - dcache typeDecoderCache -} - -// NewPointerCodec returns a PointerCodec that has been initialized. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// PointerCodec registered. -func NewPointerCodec() *PointerCodec { - return &PointerCodec{} -} - -// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil -// or looking up an encoder for the type of value the pointer points to. -func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.Ptr { - if !val.IsValid() { - return vw.WriteNull() - } - return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - typ := val.Type() - if v, ok := pc.ecache.Load(typ); ok { - if v == nil { - return ErrNoEncoder{Type: typ} - } - return v.EncodeValue(ec, vw, val.Elem()) - } - // TODO(charlie): handle concurrent requests for the same type - enc, err := ec.LookupEncoder(typ.Elem()) - enc = pc.ecache.LoadOrStore(typ, enc) - if err != nil { - return err - } - return enc.EncodeValue(ec, vw, val.Elem()) -} - -// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and -// using that to decode. If the BSON value is Null, this method will set the pointer to nil. -func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Ptr { - return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} - } - - typ := val.Type() - if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(typ)) - return vr.ReadNull() - } - if vr.Type() == bsontype.Undefined { - val.Set(reflect.Zero(typ)) - return vr.ReadUndefined() - } - - if val.IsNil() { - val.Set(reflect.New(typ.Elem())) - } - - if v, ok := pc.dcache.Load(typ); ok { - if v == nil { - return ErrNoDecoder{Type: typ} - } - return v.DecodeValue(dc, vr, val.Elem()) - } - // TODO(charlie): handle concurrent requests for the same type - dec, err := dc.LookupDecoder(typ.Elem()) - dec = pc.dcache.LoadOrStore(typ, dec) - if err != nil { - return err - } - return dec.DecodeValue(dc, vr, val.Elem()) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go deleted file mode 100644 index 4cf2b01ab4..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types -// that implement this interface with have ProxyBSON called during the encoding process and that -// value will be encoded in place for the implementer. -type Proxy interface { - ProxyBSON() (interface{}, error) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go deleted file mode 100644 index 196c491bbb..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ /dev/null @@ -1,524 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "errors" - "fmt" - "reflect" - "sync" - - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. -// -// Deprecated: ErrNilType will not be supported in Go Driver 2.0. -var ErrNilType = errors.New("cannot perform a decoder lookup on ") - -// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. -// -// Deprecated: ErrNotPointer will not be supported in Go Driver 2.0. -var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") - -// ErrNoEncoder is returned when there wasn't an encoder available for a type. -// -// Deprecated: ErrNoEncoder will not be supported in Go Driver 2.0. -type ErrNoEncoder struct { - Type reflect.Type -} - -func (ene ErrNoEncoder) Error() string { - if ene.Type == nil { - return "no encoder found for " - } - return "no encoder found for " + ene.Type.String() -} - -// ErrNoDecoder is returned when there wasn't a decoder available for a type. -// -// Deprecated: ErrNoDecoder will not be supported in Go Driver 2.0. -type ErrNoDecoder struct { - Type reflect.Type -} - -func (end ErrNoDecoder) Error() string { - return "no decoder found for " + end.Type.String() -} - -// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. -// -// Deprecated: ErrNoTypeMapEntry will not be supported in Go Driver 2.0. -type ErrNoTypeMapEntry struct { - Type bsontype.Type -} - -func (entme ErrNoTypeMapEntry) Error() string { - return "no type map entry found for " + entme.Type.String() -} - -// ErrNotInterface is returned when the provided type is not an interface. -// -// Deprecated: ErrNotInterface will not be supported in Go Driver 2.0. -var ErrNotInterface = errors.New("The provided type is not an interface") - -// A RegistryBuilder is used to build a Registry. This type is not goroutine -// safe. -// -// Deprecated: Use Registry instead. -type RegistryBuilder struct { - registry *Registry -} - -// NewRegistryBuilder creates a new empty RegistryBuilder. -// -// Deprecated: Use NewRegistry instead. -func NewRegistryBuilder() *RegistryBuilder { - return &RegistryBuilder{ - registry: NewRegistry(), - } -} - -// RegisterCodec will register the provided ValueCodec for the provided type. -// -// Deprecated: Use Registry.RegisterTypeEncoder and Registry.RegisterTypeDecoder instead. -func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { - rb.RegisterTypeEncoder(t, codec) - rb.RegisterTypeDecoder(t, codec) - return rb -} - -// RegisterTypeEncoder will register the provided ValueEncoder for the provided type. -// -// The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered -// for a pointer to that type. -// -// If the given type is an interface, the encoder will be called when marshaling a type that is that interface. It -// will not be called when marshaling a non-interface type that implements the interface. -// -// Deprecated: Use Registry.RegisterTypeEncoder instead. -func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - rb.registry.RegisterTypeEncoder(t, enc) - return rb -} - -// RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when -// marshaling a type if the type implements t or a pointer to the type implements t. If the provided type is not -// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. -// -// Deprecated: Use Registry.RegisterInterfaceEncoder instead. -func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - rb.registry.RegisterInterfaceEncoder(t, enc) - return rb -} - -// RegisterTypeDecoder will register the provided ValueDecoder for the provided type. -// -// The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered -// for a pointer to that type. -// -// If the given type is an interface, the decoder will be called when unmarshaling into a type that is that interface. -// It will not be called when unmarshaling into a non-interface type that implements the interface. -// -// Deprecated: Use Registry.RegisterTypeDecoder instead. -func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - rb.registry.RegisterTypeDecoder(t, dec) - return rb -} - -// RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when -// unmarshaling into a type if the type implements t or a pointer to the type implements t. If the provided type is not -// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. -// -// Deprecated: Use Registry.RegisterInterfaceDecoder instead. -func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - rb.registry.RegisterInterfaceDecoder(t, dec) - return rb -} - -// RegisterEncoder registers the provided type and encoder pair. -// -// Deprecated: Use Registry.RegisterTypeEncoder or Registry.RegisterInterfaceEncoder instead. -func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - if t == tEmpty { - rb.registry.RegisterTypeEncoder(t, enc) - return rb - } - switch t.Kind() { - case reflect.Interface: - rb.registry.RegisterInterfaceEncoder(t, enc) - default: - rb.registry.RegisterTypeEncoder(t, enc) - } - return rb -} - -// RegisterDecoder registers the provided type and decoder pair. -// -// Deprecated: Use Registry.RegisterTypeDecoder or Registry.RegisterInterfaceDecoder instead. -func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - if t == nil { - rb.registry.RegisterTypeDecoder(t, dec) - return rb - } - if t == tEmpty { - rb.registry.RegisterTypeDecoder(t, dec) - return rb - } - switch t.Kind() { - case reflect.Interface: - rb.registry.RegisterInterfaceDecoder(t, dec) - default: - rb.registry.RegisterTypeDecoder(t, dec) - } - return rb -} - -// RegisterDefaultEncoder will register the provided ValueEncoder to the provided -// kind. -// -// Deprecated: Use Registry.RegisterKindEncoder instead. -func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { - rb.registry.RegisterKindEncoder(kind, enc) - return rb -} - -// RegisterDefaultDecoder will register the provided ValueDecoder to the -// provided kind. -// -// Deprecated: Use Registry.RegisterKindDecoder instead. -func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { - rb.registry.RegisterKindDecoder(kind, dec) - return rb -} - -// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this -// mapping is decoding situations where an empty interface is used and a default type needs to be -// created and decoded into. -// -// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON -// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents -// to decode to bson.Raw, use the following code: -// -// rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) -// -// Deprecated: Use Registry.RegisterTypeMapEntry instead. -func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { - rb.registry.RegisterTypeMapEntry(bt, rt) - return rb -} - -// Build creates a Registry from the current state of this RegistryBuilder. -// -// Deprecated: Use NewRegistry instead. -func (rb *RegistryBuilder) Build() *Registry { - r := &Registry{ - interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...), - interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...), - typeEncoders: rb.registry.typeEncoders.Clone(), - typeDecoders: rb.registry.typeDecoders.Clone(), - kindEncoders: rb.registry.kindEncoders.Clone(), - kindDecoders: rb.registry.kindDecoders.Clone(), - } - rb.registry.typeMap.Range(func(k, v interface{}) bool { - if k != nil && v != nil { - r.typeMap.Store(k, v) - } - return true - }) - return r -} - -// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main -// typed passed around and Encoders and Decoders are constructed from it. -type Registry struct { - interfaceEncoders []interfaceValueEncoder - interfaceDecoders []interfaceValueDecoder - typeEncoders *typeEncoderCache - typeDecoders *typeDecoderCache - kindEncoders *kindEncoderCache - kindDecoders *kindDecoderCache - typeMap sync.Map // map[bsontype.Type]reflect.Type -} - -// NewRegistry creates a new empty Registry. -func NewRegistry() *Registry { - return &Registry{ - typeEncoders: new(typeEncoderCache), - typeDecoders: new(typeDecoderCache), - kindEncoders: new(kindEncoderCache), - kindDecoders: new(kindDecoderCache), - } -} - -// RegisterTypeEncoder registers the provided ValueEncoder for the provided type. -// -// The type will be used as provided, so an encoder can be registered for a type and a different -// encoder can be registered for a pointer to that type. -// -// If the given type is an interface, the encoder will be called when marshaling a type that is -// that interface. It will not be called when marshaling a non-interface type that implements the -// interface. To get the latter behavior, call RegisterHookEncoder instead. -// -// RegisterTypeEncoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) { - r.typeEncoders.Store(valueType, enc) -} - -// RegisterTypeDecoder registers the provided ValueDecoder for the provided type. -// -// The type will be used as provided, so a decoder can be registered for a type and a different -// decoder can be registered for a pointer to that type. -// -// If the given type is an interface, the decoder will be called when unmarshaling into a type that -// is that interface. It will not be called when unmarshaling into a non-interface type that -// implements the interface. To get the latter behavior, call RegisterHookDecoder instead. -// -// RegisterTypeDecoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) { - r.typeDecoders.Store(valueType, dec) -} - -// RegisterKindEncoder registers the provided ValueEncoder for the provided kind. -// -// Use RegisterKindEncoder to register an encoder for any type with the same underlying kind. For -// example, consider the type MyInt defined as -// -// type MyInt int32 -// -// To define an encoder for MyInt and int32, use RegisterKindEncoder like -// -// reg.RegisterKindEncoder(reflect.Int32, myEncoder) -// -// RegisterKindEncoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) { - r.kindEncoders.Store(kind, enc) -} - -// RegisterKindDecoder registers the provided ValueDecoder for the provided kind. -// -// Use RegisterKindDecoder to register a decoder for any type with the same underlying kind. For -// example, consider the type MyInt defined as -// -// type MyInt int32 -// -// To define an decoder for MyInt and int32, use RegisterKindDecoder like -// -// reg.RegisterKindDecoder(reflect.Int32, myDecoder) -// -// RegisterKindDecoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) { - r.kindDecoders.Store(kind, dec) -} - -// RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will -// be called when marshaling a type if the type implements iface or a pointer to the type -// implements iface. If the provided type is not an interface -// (i.e. iface.Kind() != reflect.Interface), this method will panic. -// -// RegisterInterfaceEncoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterInterfaceEncoder(iface reflect.Type, enc ValueEncoder) { - if iface.Kind() != reflect.Interface { - panicStr := fmt.Errorf("RegisterInterfaceEncoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", iface, iface.Kind()) - panic(panicStr) - } - - for idx, encoder := range r.interfaceEncoders { - if encoder.i == iface { - r.interfaceEncoders[idx].ve = enc - return - } - } - - r.interfaceEncoders = append(r.interfaceEncoders, interfaceValueEncoder{i: iface, ve: enc}) -} - -// RegisterInterfaceDecoder registers an decoder for the provided interface type iface. This decoder will -// be called when unmarshaling into a type if the type implements iface or a pointer to the type -// implements iface. If the provided type is not an interface (i.e. iface.Kind() != reflect.Interface), -// this method will panic. -// -// RegisterInterfaceDecoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder) { - if iface.Kind() != reflect.Interface { - panicStr := fmt.Errorf("RegisterInterfaceDecoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", iface, iface.Kind()) - panic(panicStr) - } - - for idx, decoder := range r.interfaceDecoders { - if decoder.i == iface { - r.interfaceDecoders[idx].vd = dec - return - } - } - - r.interfaceDecoders = append(r.interfaceDecoders, interfaceValueDecoder{i: iface, vd: dec}) -} - -// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this -// mapping is decoding situations where an empty interface is used and a default type needs to be -// created and decoded into. -// -// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON -// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents -// to decode to bson.Raw, use the following code: -// -// reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) -func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { - r.typeMap.Store(bt, rt) -} - -// LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup -// order: -// -// 1. An encoder registered for the exact type. If the given type is an interface, an encoder -// registered using RegisterTypeEncoder for that interface will be selected. -// -// 2. An encoder registered using RegisterInterfaceEncoder for an interface implemented by the type -// or by a pointer to the type. -// -// 3. An encoder registered using RegisterKindEncoder for the kind of value. -// -// If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for -// concurrent use by multiple goroutines after all codecs and encoders are registered. -func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { - if valueType == nil { - return nil, ErrNoEncoder{Type: valueType} - } - enc, found := r.lookupTypeEncoder(valueType) - if found { - if enc == nil { - return nil, ErrNoEncoder{Type: valueType} - } - return enc, nil - } - - enc, found = r.lookupInterfaceEncoder(valueType, true) - if found { - return r.typeEncoders.LoadOrStore(valueType, enc), nil - } - - if v, ok := r.kindEncoders.Load(valueType.Kind()); ok { - return r.storeTypeEncoder(valueType, v), nil - } - return nil, ErrNoEncoder{Type: valueType} -} - -func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder { - return r.typeEncoders.LoadOrStore(rt, enc) -} - -func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) { - return r.typeEncoders.Load(rt) -} - -func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) { - if valueType == nil { - return nil, false - } - for _, ienc := range r.interfaceEncoders { - if valueType.Implements(ienc.i) { - return ienc.ve, true - } - if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(ienc.i) { - // if *t implements an interface, this will catch if t implements an interface further - // ahead in interfaceEncoders - defaultEnc, found := r.lookupInterfaceEncoder(valueType, false) - if !found { - defaultEnc, _ = r.kindEncoders.Load(valueType.Kind()) - } - return newCondAddrEncoder(ienc.ve, defaultEnc), true - } - } - return nil, false -} - -// LookupDecoder returns the first matching decoder in the Registry. It uses the following lookup -// order: -// -// 1. A decoder registered for the exact type. If the given type is an interface, a decoder -// registered using RegisterTypeDecoder for that interface will be selected. -// -// 2. A decoder registered using RegisterInterfaceDecoder for an interface implemented by the type or by -// a pointer to the type. -// -// 3. A decoder registered using RegisterKindDecoder for the kind of value. -// -// If no decoder is found, an error of type ErrNoDecoder is returned. LookupDecoder is safe for -// concurrent use by multiple goroutines after all codecs and decoders are registered. -func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { - if valueType == nil { - return nil, ErrNilType - } - dec, found := r.lookupTypeDecoder(valueType) - if found { - if dec == nil { - return nil, ErrNoDecoder{Type: valueType} - } - return dec, nil - } - - dec, found = r.lookupInterfaceDecoder(valueType, true) - if found { - return r.storeTypeDecoder(valueType, dec), nil - } - - if v, ok := r.kindDecoders.Load(valueType.Kind()); ok { - return r.storeTypeDecoder(valueType, v), nil - } - return nil, ErrNoDecoder{Type: valueType} -} - -func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) { - return r.typeDecoders.Load(valueType) -} - -func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder { - return r.typeDecoders.LoadOrStore(typ, dec) -} - -func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) { - for _, idec := range r.interfaceDecoders { - if valueType.Implements(idec.i) { - return idec.vd, true - } - if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(idec.i) { - // if *t implements an interface, this will catch if t implements an interface further - // ahead in interfaceDecoders - defaultDec, found := r.lookupInterfaceDecoder(valueType, false) - if !found { - defaultDec, _ = r.kindDecoders.Load(valueType.Kind()) - } - return newCondAddrDecoder(idec.vd, defaultDec), true - } - } - return nil, false -} - -// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON -// type. If no type is found, ErrNoTypeMapEntry is returned. -// -// LookupTypeMapEntry should not be called concurrently with any other Registry method. -func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { - v, ok := r.typeMap.Load(bt) - if v == nil || !ok { - return nil, ErrNoTypeMapEntry{Type: bt} - } - return v.(reflect.Type), nil -} - -type interfaceValueEncoder struct { - i reflect.Type - ve ValueEncoder -} - -type interfaceValueDecoder struct { - i reflect.Type - vd ValueDecoder -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go deleted file mode 100644 index a43daf005f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -var defaultSliceCodec = NewSliceCodec() - -// SliceCodec is the Codec used for slice values. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// SliceCodec registered. -type SliceCodec struct { - // EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of - // BSON null. - // - // Deprecated: Use bson.Encoder.NilSliceAsEmpty instead. - EncodeNilAsEmpty bool -} - -// NewSliceCodec returns a MapCodec with options opts. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// SliceCodec registered. -func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { - sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) - - codec := SliceCodec{} - if sliceOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *sliceOpt.EncodeNilAsEmpty - } - return &codec -} - -// EncodeValue is the ValueEncoder for slice types. -func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Slice { - return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - if val.IsNil() && !sc.EncodeNilAsEmpty && !ec.nilSliceAsEmpty { - return vw.WriteNull() - } - - // If we have a []byte we want to treat it as a binary instead of as an array. - if val.Type().Elem() == tByte { - byteSlice := make([]byte, val.Len()) - reflect.Copy(reflect.ValueOf(byteSlice), val) - return vw.WriteBinary(byteSlice) - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type() == tD || val.Type().ConvertibleTo(tD) { - d := val.Convert(tD).Interface().(primitive.D) - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for _, e := range d { - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if lookupErr == errInvalidValue { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -// DecodeValue is the ValueDecoder for slice types. -func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Slice { - return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Array: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - case bsontype.Binary: - if val.Type().Elem() != tByte { - return fmt.Errorf("SliceDecodeValue can only decode a binary into a byte array, got %v", vrType) - } - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return fmt.Errorf("SliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(data))) - } - val.SetLen(0) - val.Set(reflect.AppendSlice(val, reflect.ValueOf(data))) - return nil - case bsontype.String: - if sliceType := val.Type().Elem(); sliceType != tByte { - return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", sliceType) - } - str, err := vr.ReadString() - if err != nil { - return err - } - byteStr := []byte(str) - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr))) - } - val.SetLen(0) - val.Set(reflect.AppendSlice(val, reflect.ValueOf(byteStr))) - return nil - default: - return fmt.Errorf("cannot decode %v into a slice", vrType) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - dc.Ancestor = val.Type() - elemsFunc = defaultValueDecoders.decodeD - default: - elemsFunc = defaultValueDecoders.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) - } - - val.SetLen(0) - val.Set(reflect.Append(val, elems...)) - - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go deleted file mode 100644 index ff931b7253..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// StringCodec is the Codec used for string values. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StringCodec registered. -type StringCodec struct { - // DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. - // If false, a string made from the raw object ID bytes will be used. Defaults to true. - // - // Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. - DecodeObjectIDAsHex bool -} - -var ( - defaultStringCodec = NewStringCodec() - - // Assert that defaultStringCodec satisfies the typeDecoder interface, which allows it to be - // used by collection type decoders (e.g. map, slice, etc) to set individual values in a - // collection. - _ typeDecoder = defaultStringCodec -) - -// NewStringCodec returns a StringCodec with options opts. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StringCodec registered. -func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { - stringOpt := bsonoptions.MergeStringCodecOptions(opts...) - return &StringCodec{*stringOpt.DecodeObjectIDAsHex} -} - -// EncodeValue is the ValueEncoder for string types. -func (sc *StringCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.String { - return ValueEncoderError{ - Name: "StringEncodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: val, - } - } - - return vw.WriteString(val.String()) -} - -func (sc *StringCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t.Kind() != reflect.String { - return emptyValue, ValueDecoderError{ - Name: "StringDecodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: reflect.Zero(t), - } - } - - var str string - var err error - switch vr.Type() { - case bsontype.String: - str, err = vr.ReadString() - if err != nil { - return emptyValue, err - } - case bsontype.ObjectID: - oid, err := vr.ReadObjectID() - if err != nil { - return emptyValue, err - } - if sc.DecodeObjectIDAsHex { - str = oid.Hex() - } else { - // TODO(GODRIVER-2796): Return an error here instead of decoding to a garbled string. - byteArray := [12]byte(oid) - str = string(byteArray[:]) - } - case bsontype.Symbol: - str, err = vr.ReadSymbol() - if err != nil { - return emptyValue, err - } - case bsontype.Binary: - data, subtype, err := vr.ReadBinary() - if err != nil { - return emptyValue, err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "string"} - } - str = string(data) - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a string type", vr.Type()) - } - - return reflect.ValueOf(str), nil -} - -// DecodeValue is the ValueDecoder for string types. -func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.String { - return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} - } - - elem, err := sc.decodeType(dctx, vr, val.Type()) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go deleted file mode 100644 index 4cde0a4d6b..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ /dev/null @@ -1,718 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strings" - "sync" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// DecodeError represents an error that occurs when unmarshalling BSON bytes into a native Go type. -type DecodeError struct { - keys []string - wrapped error -} - -// Unwrap returns the underlying error -func (de *DecodeError) Unwrap() error { - return de.wrapped -} - -// Error implements the error interface. -func (de *DecodeError) Error() string { - // The keys are stored in reverse order because the de.keys slice is builtup while propagating the error up the - // stack of BSON keys, so we call de.Keys(), which reverses them. - keyPath := strings.Join(de.Keys(), ".") - return fmt.Sprintf("error decoding key %s: %v", keyPath, de.wrapped) -} - -// Keys returns the BSON key path that caused an error as a slice of strings. The keys in the slice are in top-down -// order. For example, if the document being unmarshalled was {a: {b: {c: 1}}} and the value for c was supposed to be -// a string, the keys slice will be ["a", "b", "c"]. -func (de *DecodeError) Keys() []string { - reversedKeys := make([]string, 0, len(de.keys)) - for idx := len(de.keys) - 1; idx >= 0; idx-- { - reversedKeys = append(reversedKeys, de.keys[idx]) - } - - return reversedKeys -} - -// Zeroer allows custom struct types to implement a report of zero -// state. All struct types that don't implement Zeroer or where IsZero -// returns false are considered to be not zero. -type Zeroer interface { - IsZero() bool -} - -// StructCodec is the Codec used for struct values. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StructCodec registered. -type StructCodec struct { - cache sync.Map // map[reflect.Type]*structDescription - parser StructTagParser - - // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the - // destination value passed to Decode before unmarshaling BSON documents into them. - // - // Deprecated: Use bson.Decoder.ZeroStructs instead. - DecodeZeroStruct bool - - // DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the - // destination value passed to Decode before unmarshaling BSON documents into them. - // - // Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. - DecodeDeepZeroInline bool - - // EncodeOmitDefaultStruct causes the Encoder to consider the zero value for a struct (e.g. - // MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag - // option is set. - // - // Deprecated: Use bson.Encoder.OmitZeroStruct instead. - EncodeOmitDefaultStruct bool - - // AllowUnexportedFields allows encoding and decoding values from un-exported struct fields. - // - // Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be - // supported in Go Driver 2.0. - AllowUnexportedFields bool - - // OverwriteDuplicatedInlinedFields, if false, causes EncodeValue to return an error if there is - // a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The - // default value is true. - // - // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates instead. - OverwriteDuplicatedInlinedFields bool -} - -var _ ValueEncoder = &StructCodec{} -var _ ValueDecoder = &StructCodec{} - -// NewStructCodec returns a StructCodec that uses p for struct tag parsing. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StructCodec registered. -func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { - if p == nil { - return nil, errors.New("a StructTagParser must be provided to NewStructCodec") - } - - structOpt := bsonoptions.MergeStructCodecOptions(opts...) - - codec := &StructCodec{ - parser: p, - } - - if structOpt.DecodeZeroStruct != nil { - codec.DecodeZeroStruct = *structOpt.DecodeZeroStruct - } - if structOpt.DecodeDeepZeroInline != nil { - codec.DecodeDeepZeroInline = *structOpt.DecodeDeepZeroInline - } - if structOpt.EncodeOmitDefaultStruct != nil { - codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct - } - if structOpt.OverwriteDuplicatedInlinedFields != nil { - codec.OverwriteDuplicatedInlinedFields = *structOpt.OverwriteDuplicatedInlinedFields - } - if structOpt.AllowUnexportedFields != nil { - codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields - } - - return codec, nil -} - -// EncodeValue handles encoding generic struct types. -func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Struct { - return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} - } - - sd, err := sc.describeStruct(ec.Registry, val.Type(), ec.useJSONStructTags, ec.errorOnInlineDuplicates) - if err != nil { - return err - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - var rv reflect.Value - for _, desc := range sd.fl { - if desc.inline == nil { - rv = val.Field(desc.idx) - } else { - rv, err = fieldByIndexErr(val, desc.inline) - if err != nil { - continue - } - } - - desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv) - - if err != nil && err != errInvalidValue { - return err - } - - if err == errInvalidValue { - if desc.omitEmpty { - continue - } - vw2, err := dw.WriteDocumentElement(desc.name) - if err != nil { - return err - } - err = vw2.WriteNull() - if err != nil { - return err - } - continue - } - - if desc.encoder == nil { - return ErrNoEncoder{Type: rv.Type()} - } - - encoder := desc.encoder - - var zero bool - if cz, ok := encoder.(CodecZeroer); ok { - zero = cz.IsTypeZero(rv.Interface()) - } else if rv.Kind() == reflect.Interface { - // isZero will not treat an interface rv as an interface, so we need to check for the - // zero interface separately. - zero = rv.IsNil() - } else { - zero = isZero(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) - } - if desc.omitEmpty && zero { - continue - } - - vw2, err := dw.WriteDocumentElement(desc.name) - if err != nil { - return err - } - - ectx := EncodeContext{ - Registry: ec.Registry, - MinSize: desc.minSize || ec.MinSize, - errorOnInlineDuplicates: ec.errorOnInlineDuplicates, - stringifyMapKeysWithFmt: ec.stringifyMapKeysWithFmt, - nilMapAsEmpty: ec.nilMapAsEmpty, - nilSliceAsEmpty: ec.nilSliceAsEmpty, - nilByteSliceAsEmpty: ec.nilByteSliceAsEmpty, - omitZeroStruct: ec.omitZeroStruct, - useJSONStructTags: ec.useJSONStructTags, - } - err = encoder.EncodeValue(ectx, vw2, rv) - if err != nil { - return err - } - } - - if sd.inlineMap >= 0 { - rv := val.Field(sd.inlineMap) - collisionFn := func(key string) bool { - _, exists := sd.fm[key] - return exists - } - - return defaultMapCodec.mapEncodeValue(ec, dw, rv, collisionFn) - } - - return dw.WriteDocumentEnd() -} - -func newDecodeError(key string, original error) error { - de, ok := original.(*DecodeError) - if !ok { - return &DecodeError{ - keys: []string{key}, - wrapped: original, - } - } - - de.keys = append(de.keys, key) - return de -} - -// DecodeValue implements the Codec interface. -// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. -// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. -func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Struct { - return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - if err := vr.ReadNull(); err != nil { - return err - } - - val.Set(reflect.Zero(val.Type())) - return nil - case bsontype.Undefined: - if err := vr.ReadUndefined(); err != nil { - return err - } - - val.Set(reflect.Zero(val.Type())) - return nil - default: - return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) - } - - sd, err := sc.describeStruct(dc.Registry, val.Type(), dc.useJSONStructTags, false) - if err != nil { - return err - } - - if sc.DecodeZeroStruct || dc.zeroStructs { - val.Set(reflect.Zero(val.Type())) - } - if sc.DecodeDeepZeroInline && sd.inline { - val.Set(deepZero(val.Type())) - } - - var decoder ValueDecoder - var inlineMap reflect.Value - if sd.inlineMap >= 0 { - inlineMap = val.Field(sd.inlineMap) - decoder, err = dc.LookupDecoder(inlineMap.Type().Elem()) - if err != nil { - return err - } - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - for { - name, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } - if err != nil { - return err - } - - fd, exists := sd.fm[name] - if !exists { - // if the original name isn't found in the struct description, try again with the name in lowercase - // this could match if a BSON tag isn't specified because by default, describeStruct lowercases all field - // names - fd, exists = sd.fm[strings.ToLower(name)] - } - - if !exists { - if sd.inlineMap < 0 { - // The encoding/json package requires a flag to return on error for non-existent fields. - // This functionality seems appropriate for the struct codec. - err = vr.Skip() - if err != nil { - return err - } - continue - } - - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - - elem := reflect.New(inlineMap.Type().Elem()).Elem() - dc.Ancestor = inlineMap.Type() - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - inlineMap.SetMapIndex(reflect.ValueOf(name), elem) - continue - } - - var field reflect.Value - if fd.inline == nil { - field = val.Field(fd.idx) - } else { - field, err = getInlineField(val, fd.inline) - if err != nil { - return err - } - } - - if !field.CanSet() { // Being settable is a super set of being addressable. - innerErr := fmt.Errorf("field %v is not settable", field) - return newDecodeError(fd.name, innerErr) - } - if field.Kind() == reflect.Ptr && field.IsNil() { - field.Set(reflect.New(field.Type().Elem())) - } - field = field.Addr() - - dctx := DecodeContext{ - Registry: dc.Registry, - Truncate: fd.truncate || dc.Truncate, - defaultDocumentType: dc.defaultDocumentType, - binaryAsSlice: dc.binaryAsSlice, - useJSONStructTags: dc.useJSONStructTags, - useLocalTimeZone: dc.useLocalTimeZone, - zeroMaps: dc.zeroMaps, - zeroStructs: dc.zeroStructs, - } - - if fd.decoder == nil { - return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) - } - - err = fd.decoder.DecodeValue(dctx, vr, field.Elem()) - if err != nil { - return newDecodeError(fd.name, err) - } - } - - return nil -} - -func isZero(v reflect.Value, omitZeroStruct bool) bool { - kind := v.Kind() - if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) { - return v.Interface().(Zeroer).IsZero() - } - if kind == reflect.Struct { - if !omitZeroStruct { - return false - } - vt := v.Type() - if vt == tTime { - return v.Interface().(time.Time).IsZero() - } - numField := vt.NumField() - for i := 0; i < numField; i++ { - ff := vt.Field(i) - if ff.PkgPath != "" && !ff.Anonymous { - continue // Private field - } - if !isZero(v.Field(i), omitZeroStruct) { - return false - } - } - return true - } - return !v.IsValid() || v.IsZero() -} - -type structDescription struct { - fm map[string]fieldDescription - fl []fieldDescription - inlineMap int - inline bool -} - -type fieldDescription struct { - name string // BSON key name - fieldName string // struct field name - idx int - omitEmpty bool - minSize bool - truncate bool - inline []int - encoder ValueEncoder - decoder ValueDecoder -} - -type byIndex []fieldDescription - -func (bi byIndex) Len() int { return len(bi) } - -func (bi byIndex) Swap(i, j int) { bi[i], bi[j] = bi[j], bi[i] } - -func (bi byIndex) Less(i, j int) bool { - // If a field is inlined, its index in the top level struct is stored at inline[0] - iIdx, jIdx := bi[i].idx, bi[j].idx - if len(bi[i].inline) > 0 { - iIdx = bi[i].inline[0] - } - if len(bi[j].inline) > 0 { - jIdx = bi[j].inline[0] - } - if iIdx != jIdx { - return iIdx < jIdx - } - for k, biik := range bi[i].inline { - if k >= len(bi[j].inline) { - return false - } - if biik != bi[j].inline[k] { - return biik < bi[j].inline[k] - } - } - return len(bi[i].inline) < len(bi[j].inline) -} - -func (sc *StructCodec) describeStruct( - r *Registry, - t reflect.Type, - useJSONStructTags bool, - errorOnDuplicates bool, -) (*structDescription, error) { - // We need to analyze the struct, including getting the tags, collecting - // information about inlining, and create a map of the field name to the field. - if v, ok := sc.cache.Load(t); ok { - return v.(*structDescription), nil - } - // TODO(charlie): Only describe the struct once when called - // concurrently with the same type. - ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates) - if err != nil { - return nil, err - } - if v, loaded := sc.cache.LoadOrStore(t, ds); loaded { - ds = v.(*structDescription) - } - return ds, nil -} - -func (sc *StructCodec) describeStructSlow( - r *Registry, - t reflect.Type, - useJSONStructTags bool, - errorOnDuplicates bool, -) (*structDescription, error) { - numFields := t.NumField() - sd := &structDescription{ - fm: make(map[string]fieldDescription, numFields), - fl: make([]fieldDescription, 0, numFields), - inlineMap: -1, - } - - var fields []fieldDescription - for i := 0; i < numFields; i++ { - sf := t.Field(i) - if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) { - // field is private or unexported fields aren't allowed, ignore - continue - } - - sfType := sf.Type - encoder, err := r.LookupEncoder(sfType) - if err != nil { - encoder = nil - } - decoder, err := r.LookupDecoder(sfType) - if err != nil { - decoder = nil - } - - description := fieldDescription{ - fieldName: sf.Name, - idx: i, - encoder: encoder, - decoder: decoder, - } - - var stags StructTags - // If the caller requested that we use JSON struct tags, use the JSONFallbackStructTagParser - // instead of the parser defined on the codec. - if useJSONStructTags { - stags, err = JSONFallbackStructTagParser.ParseStructTags(sf) - } else { - stags, err = sc.parser.ParseStructTags(sf) - } - if err != nil { - return nil, err - } - if stags.Skip { - continue - } - description.name = stags.Name - description.omitEmpty = stags.OmitEmpty - description.minSize = stags.MinSize - description.truncate = stags.Truncate - - if stags.Inline { - sd.inline = true - switch sfType.Kind() { - case reflect.Map: - if sd.inlineMap >= 0 { - return nil, errors.New("(struct " + t.String() + ") multiple inline maps") - } - if sfType.Key() != tString { - return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys") - } - sd.inlineMap = description.idx - case reflect.Ptr: - sfType = sfType.Elem() - if sfType.Kind() != reflect.Struct { - return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) - } - fallthrough - case reflect.Struct: - inlinesf, err := sc.describeStruct(r, sfType, useJSONStructTags, errorOnDuplicates) - if err != nil { - return nil, err - } - for _, fd := range inlinesf.fl { - if fd.inline == nil { - fd.inline = []int{i, fd.idx} - } else { - fd.inline = append([]int{i}, fd.inline...) - } - fields = append(fields, fd) - - } - default: - return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) - } - continue - } - fields = append(fields, description) - } - - // Sort fieldDescriptions by name and use dominance rules to determine which should be added for each name - sort.Slice(fields, func(i, j int) bool { - x := fields - // sort field by name, breaking ties with depth, then - // breaking ties with index sequence. - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].inline) != len(x[j].inline) { - return len(x[i].inline) < len(x[j].inline) - } - return byIndex(x).Less(i, j) - }) - - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - sd.fl = append(sd.fl, fi) - sd.fm[name] = fi - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if !ok || !sc.OverwriteDuplicatedInlinedFields || errorOnDuplicates { - return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) - } - sd.fl = append(sd.fl, dominant) - sd.fm[name] = dominant - } - - sort.Sort(byIndex(sd.fl)) - - return sd, nil -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's inlining rules. If there are multiple top-level -// fields, the boolean will be false: This condition is an error in Go -// and we skip all the fields. -func dominantField(fields []fieldDescription) (fieldDescription, bool) { - // The fields are sorted in increasing index-length order, then by presence of tag. - // That means that the first field is the dominant one. We need only check - // for error cases: two fields at top level. - if len(fields) > 1 && - len(fields[0].inline) == len(fields[1].inline) { - return fieldDescription{}, false - } - return fields[0], true -} - -func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) { - defer func() { - if recovered := recover(); recovered != nil { - switch r := recovered.(type) { - case string: - err = fmt.Errorf("%s", r) - case error: - err = r - } - } - }() - - result = v.FieldByIndex(index) - return -} - -func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { - field, err := fieldByIndexErr(val, index) - if err == nil { - return field, nil - } - - // if parent of this element doesn't exist, fix its parent - inlineParent := index[:len(index)-1] - var fParent reflect.Value - if fParent, err = fieldByIndexErr(val, inlineParent); err != nil { - fParent, err = getInlineField(val, inlineParent) - if err != nil { - return fParent, err - } - } - fParent.Set(reflect.New(fParent.Type().Elem())) - - return fieldByIndexErr(val, index) -} - -// DeepZero returns recursive zero object -func deepZero(st reflect.Type) (result reflect.Value) { - if st.Kind() == reflect.Struct { - numField := st.NumField() - for i := 0; i < numField; i++ { - if result == emptyValue { - result = reflect.Indirect(reflect.New(st)) - } - f := result.Field(i) - if f.CanInterface() { - if f.Type().Kind() == reflect.Struct { - result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem()))) - } - } - } - } - return result -} - -// recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside -func recursivePointerTo(v reflect.Value) reflect.Value { - v = reflect.Indirect(v) - result := reflect.New(v.Type()) - if v.Kind() == reflect.Struct { - for i := 0; i < v.NumField(); i++ { - if f := v.Field(i); f.Kind() == reflect.Ptr { - if f.Elem().Kind() == reflect.Struct { - result.Elem().Field(i).Set(recursivePointerTo(f)) - } - } - } - } - - return result -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go deleted file mode 100644 index 18d85bfb03..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - "strings" -) - -// StructTagParser returns the struct tags for a given struct field. -// -// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. -type StructTagParser interface { - ParseStructTags(reflect.StructField) (StructTags, error) -} - -// StructTagParserFunc is an adapter that allows a generic function to be used -// as a StructTagParser. -// -// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. -type StructTagParserFunc func(reflect.StructField) (StructTags, error) - -// ParseStructTags implements the StructTagParser interface. -func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) { - return stpf(sf) -} - -// StructTags represents the struct tag fields that the StructCodec uses during -// the encoding and decoding process. -// -// In the case of a struct, the lowercased field name is used as the key for each exported -// field but this behavior may be changed using a struct tag. The tag may also contain flags to -// adjust the marshalling behavior for the field. -// -// The properties are defined below: -// -// OmitEmpty Only include the field if it's not set to the zero value for the type or to -// empty slices or maps. -// -// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's -// feasible while preserving the numeric value. -// -// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within -// a float32. -// -// Inline Inline the field, which must be a struct or a map, causing all of its fields -// or keys to be processed as if they were part of the outer struct. For maps, -// keys must not conflict with the bson keys of other struct fields. -// -// Skip This struct field should be skipped. This is usually denoted by parsing a "-" -// for the name. -// -// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. -type StructTags struct { - Name string - OmitEmpty bool - MinSize bool - Truncate bool - Inline bool - Skip bool -} - -// DefaultStructTagParser is the StructTagParser used by the StructCodec by default. -// It will handle the bson struct tag. See the documentation for StructTags to see -// what each of the returned fields means. -// -// If there is no name in the struct tag fields, the struct field name is lowercased. -// The tag formats accepted are: -// -// "[][,[,]]" -// -// `(...) bson:"[][,[,]]" (...)` -// -// An example: -// -// type T struct { -// A bool -// B int "myb" -// C string "myc,omitempty" -// D string `bson:",omitempty" json:"jsonkey"` -// E int64 ",minsize" -// F int64 "myf,omitempty,minsize" -// } -// -// A struct tag either consisting entirely of '-' or with a bson key with a -// value consisting entirely of '-' will return a StructTags with Skip true and -// the remaining fields will be their default values. -// -// Deprecated: DefaultStructTagParser will be removed in Go Driver 2.0. -var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { - key := strings.ToLower(sf.Name) - tag, ok := sf.Tag.Lookup("bson") - if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { - tag = string(sf.Tag) - } - return parseTags(key, tag) -} - -func parseTags(key string, tag string) (StructTags, error) { - var st StructTags - if tag == "-" { - st.Skip = true - return st, nil - } - - for idx, str := range strings.Split(tag, ",") { - if idx == 0 && str != "" { - key = str - } - switch str { - case "omitempty": - st.OmitEmpty = true - case "minsize": - st.MinSize = true - case "truncate": - st.Truncate = true - case "inline": - st.Inline = true - } - } - - st.Name = key - - return st, nil -} - -// JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser -// but will also fallback to parsing the json tag instead on a field where the -// bson tag isn't available. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] and -// [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. -var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { - key := strings.ToLower(sf.Name) - tag, ok := sf.Tag.Lookup("bson") - if !ok { - tag, ok = sf.Tag.Lookup("json") - } - if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { - tag = string(sf.Tag) - } - - return parseTags(key, tag) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go deleted file mode 100644 index 7b005a9958..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -const ( - timeFormatString = "2006-01-02T15:04:05.999Z07:00" -) - -// TimeCodec is the Codec used for time.Time values. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// TimeCodec registered. -type TimeCodec struct { - // UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. - // - // Deprecated: Use bson.Decoder.UseLocalTimeZone instead. - UseLocalTimeZone bool -} - -var ( - defaultTimeCodec = NewTimeCodec() - - // Assert that defaultTimeCodec satisfies the typeDecoder interface, which allows it to be used - // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. - _ typeDecoder = defaultTimeCodec -) - -// NewTimeCodec returns a TimeCodec with options opts. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// TimeCodec registered. -func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { - timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) - - codec := TimeCodec{} - if timeOpt.UseLocalTimeZone != nil { - codec.UseLocalTimeZone = *timeOpt.UseLocalTimeZone - } - return &codec -} - -func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tTime { - return emptyValue, ValueDecoderError{ - Name: "TimeDecodeValue", - Types: []reflect.Type{tTime}, - Received: reflect.Zero(t), - } - } - - var timeVal time.Time - switch vrType := vr.Type(); vrType { - case bsontype.DateTime: - dt, err := vr.ReadDateTime() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(dt/1000, dt%1000*1000000) - case bsontype.String: - // assume strings are in the isoTimeFormat - timeStr, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - timeVal, err = time.Parse(timeFormatString, timeStr) - if err != nil { - return emptyValue, err - } - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(i64/1000, i64%1000*1000000) - case bsontype.Timestamp: - t, _, err := vr.ReadTimestamp() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(int64(t), 0) - case bsontype.Null: - if err := vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err := vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType) - } - - if !tc.UseLocalTimeZone && !dc.useLocalTimeZone { - timeVal = timeVal.UTC() - } - return reflect.ValueOf(timeVal), nil -} - -// DecodeValue is the ValueDecoderFunc for time.Time. -func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tTime { - return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} - } - - elem, err := tc.decodeType(dc, vr, tTime) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// EncodeValue is the ValueEncoderFunc for time.TIme. -func (tc *TimeCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTime { - return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} - } - tt := val.Interface().(time.Time) - dt := primitive.NewDateTimeFromTime(tt) - return vw.WriteDateTime(int64(dt)) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go deleted file mode 100644 index 6ade17b7d3..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "net/url" - "reflect" - "time" - - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var tBool = reflect.TypeOf(false) -var tFloat64 = reflect.TypeOf(float64(0)) -var tInt32 = reflect.TypeOf(int32(0)) -var tInt64 = reflect.TypeOf(int64(0)) -var tString = reflect.TypeOf("") -var tTime = reflect.TypeOf(time.Time{}) - -var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() -var tByteSlice = reflect.TypeOf([]byte(nil)) -var tByte = reflect.TypeOf(byte(0x00)) -var tURL = reflect.TypeOf(url.URL{}) -var tJSONNumber = reflect.TypeOf(json.Number("")) - -var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem() -var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() -var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() -var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() -var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem() - -var tBinary = reflect.TypeOf(primitive.Binary{}) -var tUndefined = reflect.TypeOf(primitive.Undefined{}) -var tOID = reflect.TypeOf(primitive.ObjectID{}) -var tDateTime = reflect.TypeOf(primitive.DateTime(0)) -var tNull = reflect.TypeOf(primitive.Null{}) -var tRegex = reflect.TypeOf(primitive.Regex{}) -var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{}) -var tDBPointer = reflect.TypeOf(primitive.DBPointer{}) -var tJavaScript = reflect.TypeOf(primitive.JavaScript("")) -var tSymbol = reflect.TypeOf(primitive.Symbol("")) -var tTimestamp = reflect.TypeOf(primitive.Timestamp{}) -var tDecimal = reflect.TypeOf(primitive.Decimal128{}) -var tMinKey = reflect.TypeOf(primitive.MinKey{}) -var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) -var tD = reflect.TypeOf(primitive.D{}) -var tA = reflect.TypeOf(primitive.A{}) -var tE = reflect.TypeOf(primitive.E{}) - -var tCoreDocument = reflect.TypeOf(bsoncore.Document{}) -var tCoreArray = reflect.TypeOf(bsoncore.Array{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go deleted file mode 100644 index 7eb1069050..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "math" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// UIntCodec is the Codec used for uint values. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// UIntCodec registered. -type UIntCodec struct { - // EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the - // minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value. - // - // Deprecated: Use bson.Encoder.IntMinSize instead. - EncodeToMinSize bool -} - -var ( - defaultUIntCodec = NewUIntCodec() - - // Assert that defaultUIntCodec satisfies the typeDecoder interface, which allows it to be used - // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. - _ typeDecoder = defaultUIntCodec -) - -// NewUIntCodec returns a UIntCodec with options opts. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// UIntCodec registered. -func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { - uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) - - codec := UIntCodec{} - if uintOpt.EncodeToMinSize != nil { - codec.EncodeToMinSize = *uintOpt.EncodeToMinSize - } - return &codec -} - -// EncodeValue is the ValueEncoder for uint types. -func (uic *UIntCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Uint8, reflect.Uint16: - return vw.WriteInt32(int32(val.Uint())) - case reflect.Uint, reflect.Uint32, reflect.Uint64: - u64 := val.Uint() - - // If ec.MinSize or if encodeToMinSize is true for a non-uint64 value we should write val as an int32 - useMinSize := ec.MinSize || (uic.EncodeToMinSize && val.Kind() != reflect.Uint64) - - if u64 <= math.MaxInt32 && useMinSize { - return vw.WriteInt32(int32(u64)) - } - if u64 > math.MaxInt64 { - return fmt.Errorf("%d overflows int64", u64) - } - return vw.WriteInt64(int64(u64)) - } - - return ValueEncoderError{ - Name: "UintEncodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } -} - -func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var i64 int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return emptyValue, err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return emptyValue, errCannotTruncate - } - if f64 > float64(math.MaxInt64) { - return emptyValue, fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - i64 = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) - } - - switch t.Kind() { - case reflect.Uint8: - if i64 < 0 || i64 > math.MaxUint8 { - return emptyValue, fmt.Errorf("%d overflows uint8", i64) - } - - return reflect.ValueOf(uint8(i64)), nil - case reflect.Uint16: - if i64 < 0 || i64 > math.MaxUint16 { - return emptyValue, fmt.Errorf("%d overflows uint16", i64) - } - - return reflect.ValueOf(uint16(i64)), nil - case reflect.Uint32: - if i64 < 0 || i64 > math.MaxUint32 { - return emptyValue, fmt.Errorf("%d overflows uint32", i64) - } - - return reflect.ValueOf(uint32(i64)), nil - case reflect.Uint64: - if i64 < 0 { - return emptyValue, fmt.Errorf("%d overflows uint64", i64) - } - - return reflect.ValueOf(uint64(i64)), nil - case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint - return emptyValue, fmt.Errorf("%d overflows uint", i64) - } - - return reflect.ValueOf(uint(i64)), nil - default: - return emptyValue, ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: reflect.Zero(t), - } - } -} - -// DecodeValue is the ValueDecoder for uint types. -func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - elem, err := uic.decodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.SetUint(elem.Uint()) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go deleted file mode 100644 index 996bd17127..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type ByteSliceCodecOptions struct { - EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. -} - -// ByteSliceCodec creates a new *ByteSliceCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func ByteSliceCodec() *ByteSliceCodecOptions { - return &ByteSliceCodecOptions{} -} - -// SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. -func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions { - bs.EncodeNilAsEmpty = &b - return bs -} - -// MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions { - bs := ByteSliceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeNilAsEmpty != nil { - bs.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - } - - return bs -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go deleted file mode 100644 index c40973c8d4..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2022-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsonoptions defines the optional configurations for the BSON codecs. -package bsonoptions diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go deleted file mode 100644 index f522c7e03f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type EmptyInterfaceCodecOptions struct { - DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. -} - -// EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions { - return &EmptyInterfaceCodecOptions{} -} - -// SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. -func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions { - e.DecodeBinaryAsSlice = &b - return e -} - -// MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions { - e := EmptyInterfaceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeBinaryAsSlice != nil { - e.DecodeBinaryAsSlice = opt.DecodeBinaryAsSlice - } - } - - return e -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go deleted file mode 100644 index a7a7c1d980..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// MapCodecOptions represents all possible options for map encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type MapCodecOptions struct { - DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. - EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. - // Specifies how keys should be handled. If false, the behavior matches encoding/json, where the encoding key type must - // either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key type must either be a - // string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with fmt.Sprint() and the - // encoding key type must be a string, an integer type, or a float. If true, the use of Stringer will override - // TextMarshaler/TextUnmarshaler. Defaults to false. - EncodeKeysWithStringer *bool -} - -// MapCodec creates a new *MapCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func MapCodec() *MapCodecOptions { - return &MapCodecOptions{} -} - -// SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. -func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { - t.DecodeZerosMap = &b - return t -} - -// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. -func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { - t.EncodeNilAsEmpty = &b - return t -} - -// SetEncodeKeysWithStringer specifies how keys should be handled. If false, the behavior matches encoding/json, where the -// encoding key type must either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key -// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with -// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer -// will override TextMarshaler/TextUnmarshaler. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. -func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions { - t.EncodeKeysWithStringer = &b - return t -} - -// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { - s := MapCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeZerosMap != nil { - s.DecodeZerosMap = opt.DecodeZerosMap - } - if opt.EncodeNilAsEmpty != nil { - s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - if opt.EncodeKeysWithStringer != nil { - s.EncodeKeysWithStringer = opt.EncodeKeysWithStringer - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go deleted file mode 100644 index 3c1e4f35ba..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// SliceCodecOptions represents all possible options for slice encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type SliceCodecOptions struct { - EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false. -} - -// SliceCodec creates a new *SliceCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func SliceCodec() *SliceCodecOptions { - return &SliceCodecOptions{} -} - -// SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. -func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions { - s.EncodeNilAsEmpty = &b - return s -} - -// MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions { - s := SliceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeNilAsEmpty != nil { - s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go deleted file mode 100644 index f8b76f996e..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -var defaultDecodeOIDAsHex = true - -// StringCodecOptions represents all possible options for string encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type StringCodecOptions struct { - DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true. -} - -// StringCodec creates a new *StringCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func StringCodec() *StringCodecOptions { - return &StringCodecOptions{} -} - -// SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made -// from the raw object ID bytes will be used. Defaults to true. -// -// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. -func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions { - t.DecodeObjectIDAsHex = &b - return t -} - -// MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions { - s := &StringCodecOptions{&defaultDecodeOIDAsHex} - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeObjectIDAsHex != nil { - s.DecodeObjectIDAsHex = opt.DecodeObjectIDAsHex - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go deleted file mode 100644 index 1cbfa32e8b..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -var defaultOverwriteDuplicatedInlinedFields = true - -// StructCodecOptions represents all possible options for struct encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type StructCodecOptions struct { - DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. - DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. - EncodeOmitDefaultStruct *bool // Specifies if default structs should be considered empty by omitempty. Defaults to false. - AllowUnexportedFields *bool // Specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. - OverwriteDuplicatedInlinedFields *bool // Specifies if fields in inlined structs can be overwritten by higher level struct fields with the same key. Defaults to true. -} - -// StructCodec creates a new *StructCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func StructCodec() *StructCodecOptions { - return &StructCodecOptions{} -} - -// SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. -func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions { - t.DecodeZeroStruct = &b - return t -} - -// SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false. -// -// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. -func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions { - t.DecodeDeepZeroInline = &b - return t -} - -// SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all -// its values set to their default value. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. -func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions { - t.EncodeOmitDefaultStruct = &b - return t -} - -// SetOverwriteDuplicatedInlinedFields specifies if inlined struct fields can be overwritten by higher level struct fields with the -// same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when -// encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if -// there are duplicate keys after the struct is inlined. Defaults to true. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. -func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions { - t.OverwriteDuplicatedInlinedFields = &b - return t -} - -// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. -// -// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be -// supported in Go Driver 2.0. -func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { - t.AllowUnexportedFields = &b - return t -} - -// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { - s := &StructCodecOptions{ - OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields, - } - for _, opt := range opts { - if opt == nil { - continue - } - - if opt.DecodeZeroStruct != nil { - s.DecodeZeroStruct = opt.DecodeZeroStruct - } - if opt.DecodeDeepZeroInline != nil { - s.DecodeDeepZeroInline = opt.DecodeDeepZeroInline - } - if opt.EncodeOmitDefaultStruct != nil { - s.EncodeOmitDefaultStruct = opt.EncodeOmitDefaultStruct - } - if opt.OverwriteDuplicatedInlinedFields != nil { - s.OverwriteDuplicatedInlinedFields = opt.OverwriteDuplicatedInlinedFields - } - if opt.AllowUnexportedFields != nil { - s.AllowUnexportedFields = opt.AllowUnexportedFields - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go deleted file mode 100644 index 3f38433d22..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// TimeCodecOptions represents all possible options for time.Time encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type TimeCodecOptions struct { - UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false. -} - -// TimeCodec creates a new *TimeCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func TimeCodec() *TimeCodecOptions { - return &TimeCodecOptions{} -} - -// SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. -func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions { - t.UseLocalTimeZone = &b - return t -} - -// MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions { - t := TimeCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.UseLocalTimeZone != nil { - t.UseLocalTimeZone = opt.UseLocalTimeZone - } - } - - return t -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go deleted file mode 100644 index 5091e4d963..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// UIntCodecOptions represents all possible options for uint encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type UIntCodecOptions struct { - EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. -} - -// UIntCodec creates a new *UIntCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func UIntCodec() *UIntCodecOptions { - return &UIntCodecOptions{} -} - -// SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.IntMinSize] instead. -func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions { - u.EncodeToMinSize = &b - return u -} - -// MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions { - u := UIntCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeToMinSize != nil { - u.EncodeToMinSize = opt.EncodeToMinSize - } - } - - return u -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go deleted file mode 100644 index 4d279b7fee..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go +++ /dev/null @@ -1,488 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "fmt" - "io" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// Copier is a type that allows copying between ValueReaders, ValueWriters, and -// []byte values. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -type Copier struct{} - -// NewCopier creates a new copier with the given registry. If a nil registry is provided -// a default registry is used. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func NewCopier() Copier { - return Copier{} -} - -// CopyDocument handles copying a document from src to dst. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func CopyDocument(dst ValueWriter, src ValueReader) error { - return Copier{}.CopyDocument(dst, src) -} - -// CopyDocument handles copying one document from the src to the dst. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { - dr, err := src.ReadDocument() - if err != nil { - return err - } - - dw, err := dst.WriteDocument() - if err != nil { - return err - } - - return c.copyDocumentCore(dw, dr) -} - -// CopyArrayFromBytes copies the values from a BSON array represented as a -// []byte to a ValueWriter. -// -// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { - aw, err := dst.WriteArray() - if err != nil { - return err - } - - err = c.CopyBytesToArrayWriter(aw, src) - if err != nil { - return err - } - - return aw.WriteArrayEnd() -} - -// CopyDocumentFromBytes copies the values from a BSON document represented as a -// []byte to a ValueWriter. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { - dw, err := dst.WriteDocument() - if err != nil { - return err - } - - err = c.CopyBytesToDocumentWriter(dw, src) - if err != nil { - return err - } - - return dw.WriteDocumentEnd() -} - -type writeElementFn func(key string) (ValueWriter, error) - -// CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an -// ArrayWriter. -// -// Deprecated: Copying BSON arrays using the ArrayWriter interface will not be supported in Go -// Driver 2.0. -func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { - wef := func(_ string) (ValueWriter, error) { - return dst.WriteArrayElement() - } - - return c.copyBytesToValueWriter(src, wef) -} - -// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a -// DocumentWriter. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { - wef := func(key string) (ValueWriter, error) { - return dst.WriteDocumentElement(key) - } - - return c.copyBytesToValueWriter(src, wef) -} - -func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { - // TODO(skriptble): Create errors types here. Anything that is a tag should be a property. - length, rem, ok := bsoncore.ReadLength(src) - if !ok { - return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) - } - if len(src) < int(length) { - return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length) - } - rem = rem[:length-4] - - var t bsontype.Type - var key string - var val bsoncore.Value - for { - t, rem, ok = bsoncore.ReadType(rem) - if !ok { - return io.EOF - } - if t == bsontype.Type(0) { - if len(rem) != 0 { - return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem) - } - break - } - - key, rem, ok = bsoncore.ReadKey(rem) - if !ok { - return fmt.Errorf("invalid key found. remaining bytes=%v", rem) - } - - // write as either array element or document element using writeElementFn - vw, err := wef(key) - if err != nil { - return err - } - - val, rem, ok = bsoncore.ReadValue(rem, t) - if !ok { - return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t) - } - err = c.CopyValueFromBytes(vw, t, val.Data) - if err != nil { - return err - } - } - return nil -} - -// CopyDocumentToBytes copies an entire document from the ValueReader and -// returns it as bytes. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { - return c.AppendDocumentBytes(nil, src) -} - -// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will -// append the result to dst. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { - if br, ok := src.(BytesReader); ok { - _, dst, err := br.ReadValueBytes(dst) - return dst, err - } - - vw := vwPool.Get().(*valueWriter) - defer putValueWriter(vw) - - vw.reset(dst) - - err := c.CopyDocument(vw, src) - dst = vw.buf - return dst, err -} - -// AppendArrayBytes copies an array from the ValueReader to dst. -// -// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { - if br, ok := src.(BytesReader); ok { - _, dst, err := br.ReadValueBytes(dst) - return dst, err - } - - vw := vwPool.Get().(*valueWriter) - defer putValueWriter(vw) - - vw.reset(dst) - - err := c.copyArray(vw, src) - dst = vw.buf - return dst, err -} - -// CopyValueFromBytes will write the value represtend by t and src to dst. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.UnmarshalValue] instead. -func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { - if wvb, ok := dst.(BytesWriter); ok { - return wvb.WriteValueBytes(t, src) - } - - vr := vrPool.Get().(*valueReader) - defer vrPool.Put(vr) - - vr.reset(src) - vr.pushElement(t) - - return c.CopyValue(dst, vr) -} - -// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a -// []byte. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.MarshalValue] instead. -func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { - return c.AppendValueBytes(nil, src) -} - -// AppendValueBytes functions the same as CopyValueToBytes, but will append the -// result to dst. -// -// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go -// Driver 2.0. -func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { - if br, ok := src.(BytesReader); ok { - return br.ReadValueBytes(dst) - } - - vw := vwPool.Get().(*valueWriter) - defer putValueWriter(vw) - - start := len(dst) - - vw.reset(dst) - vw.push(mElement) - - err := c.CopyValue(vw, src) - if err != nil { - return 0, dst, err - } - - return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil -} - -// CopyValue will copy a single value from src to dst. -// -// Deprecated: Copying BSON values using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { - var err error - switch src.Type() { - case bsontype.Double: - var f64 float64 - f64, err = src.ReadDouble() - if err != nil { - break - } - err = dst.WriteDouble(f64) - case bsontype.String: - var str string - str, err = src.ReadString() - if err != nil { - return err - } - err = dst.WriteString(str) - case bsontype.EmbeddedDocument: - err = c.CopyDocument(dst, src) - case bsontype.Array: - err = c.copyArray(dst, src) - case bsontype.Binary: - var data []byte - var subtype byte - data, subtype, err = src.ReadBinary() - if err != nil { - break - } - err = dst.WriteBinaryWithSubtype(data, subtype) - case bsontype.Undefined: - err = src.ReadUndefined() - if err != nil { - break - } - err = dst.WriteUndefined() - case bsontype.ObjectID: - var oid primitive.ObjectID - oid, err = src.ReadObjectID() - if err != nil { - break - } - err = dst.WriteObjectID(oid) - case bsontype.Boolean: - var b bool - b, err = src.ReadBoolean() - if err != nil { - break - } - err = dst.WriteBoolean(b) - case bsontype.DateTime: - var dt int64 - dt, err = src.ReadDateTime() - if err != nil { - break - } - err = dst.WriteDateTime(dt) - case bsontype.Null: - err = src.ReadNull() - if err != nil { - break - } - err = dst.WriteNull() - case bsontype.Regex: - var pattern, options string - pattern, options, err = src.ReadRegex() - if err != nil { - break - } - err = dst.WriteRegex(pattern, options) - case bsontype.DBPointer: - var ns string - var pointer primitive.ObjectID - ns, pointer, err = src.ReadDBPointer() - if err != nil { - break - } - err = dst.WriteDBPointer(ns, pointer) - case bsontype.JavaScript: - var js string - js, err = src.ReadJavascript() - if err != nil { - break - } - err = dst.WriteJavascript(js) - case bsontype.Symbol: - var symbol string - symbol, err = src.ReadSymbol() - if err != nil { - break - } - err = dst.WriteSymbol(symbol) - case bsontype.CodeWithScope: - var code string - var srcScope DocumentReader - code, srcScope, err = src.ReadCodeWithScope() - if err != nil { - break - } - - var dstScope DocumentWriter - dstScope, err = dst.WriteCodeWithScope(code) - if err != nil { - break - } - err = c.copyDocumentCore(dstScope, srcScope) - case bsontype.Int32: - var i32 int32 - i32, err = src.ReadInt32() - if err != nil { - break - } - err = dst.WriteInt32(i32) - case bsontype.Timestamp: - var t, i uint32 - t, i, err = src.ReadTimestamp() - if err != nil { - break - } - err = dst.WriteTimestamp(t, i) - case bsontype.Int64: - var i64 int64 - i64, err = src.ReadInt64() - if err != nil { - break - } - err = dst.WriteInt64(i64) - case bsontype.Decimal128: - var d128 primitive.Decimal128 - d128, err = src.ReadDecimal128() - if err != nil { - break - } - err = dst.WriteDecimal128(d128) - case bsontype.MinKey: - err = src.ReadMinKey() - if err != nil { - break - } - err = dst.WriteMinKey() - case bsontype.MaxKey: - err = src.ReadMaxKey() - if err != nil { - break - } - err = dst.WriteMaxKey() - default: - err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type()) - } - - return err -} - -func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { - ar, err := src.ReadArray() - if err != nil { - return err - } - - aw, err := dst.WriteArray() - if err != nil { - return err - } - - for { - vr, err := ar.ReadValue() - if err == ErrEOA { - break - } - if err != nil { - return err - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - err = c.CopyValue(vw, vr) - if err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { - for { - key, vr, err := dr.ReadElement() - if err == ErrEOD { - break - } - if err != nil { - return err - } - - vw, err := dw.WriteDocumentElement(key) - if err != nil { - return err - } - - err = c.CopyValue(vw, vr) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go deleted file mode 100644 index 750b0d2af5..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsonrw contains abstractions for reading and writing -// BSON and BSON like types from sources. -package bsonrw // import "go.mongodb.org/mongo-driver/bson/bsonrw" diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go deleted file mode 100644 index 54c76bf746..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "encoding/base64" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" - - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -const maxNestingDepth = 200 - -// ErrInvalidJSON indicates the JSON input is invalid -var ErrInvalidJSON = errors.New("invalid JSON input") - -type jsonParseState byte - -const ( - jpsStartState jsonParseState = iota - jpsSawBeginObject - jpsSawEndObject - jpsSawBeginArray - jpsSawEndArray - jpsSawColon - jpsSawComma - jpsSawKey - jpsSawValue - jpsDoneState - jpsInvalidState -) - -type jsonParseMode byte - -const ( - jpmInvalidMode jsonParseMode = iota - jpmObjectMode - jpmArrayMode -) - -type extJSONValue struct { - t bsontype.Type - v interface{} -} - -type extJSONObject struct { - keys []string - values []*extJSONValue -} - -type extJSONParser struct { - js *jsonScanner - s jsonParseState - m []jsonParseMode - k string - v *extJSONValue - - err error - canonical bool - depth int - maxDepth int - - emptyObject bool - relaxedUUID bool -} - -// newExtJSONParser returns a new extended JSON parser, ready to to begin -// parsing from the first character of the argued json input. It will not -// perform any read-ahead and will therefore not report any errors about -// malformed JSON at this point. -func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser { - return &extJSONParser{ - js: &jsonScanner{r: r}, - s: jpsStartState, - m: []jsonParseMode{}, - canonical: canonical, - maxDepth: maxNestingDepth, - } -} - -// peekType examines the next value and returns its BSON Type -func (ejp *extJSONParser) peekType() (bsontype.Type, error) { - var t bsontype.Type - var err error - initialState := ejp.s - - ejp.advanceState() - switch ejp.s { - case jpsSawValue: - t = ejp.v.t - case jpsSawBeginArray: - t = bsontype.Array - case jpsInvalidState: - err = ejp.err - case jpsSawComma: - // in array mode, seeing a comma means we need to progress again to actually observe a type - if ejp.peekMode() == jpmArrayMode { - return ejp.peekType() - } - case jpsSawEndArray: - // this would only be a valid state if we were in array mode, so return end-of-array error - err = ErrEOA - case jpsSawBeginObject: - // peek key to determine type - ejp.advanceState() - switch ejp.s { - case jpsSawEndObject: // empty embedded document - t = bsontype.EmbeddedDocument - ejp.emptyObject = true - case jpsInvalidState: - err = ejp.err - case jpsSawKey: - if initialState == jpsStartState { - return bsontype.EmbeddedDocument, nil - } - t = wrapperKeyBSONType(ejp.k) - - // if $uuid is encountered, parse as binary subtype 4 - if ejp.k == "$uuid" { - ejp.relaxedUUID = true - t = bsontype.Binary - } - - switch t { - case bsontype.JavaScript: - // just saw $code, need to check for $scope at same level - _, err = ejp.readValue(bsontype.JavaScript) - if err != nil { - break - } - - switch ejp.s { - case jpsSawEndObject: // type is TypeJavaScript - case jpsSawComma: - ejp.advanceState() - - if ejp.s == jpsSawKey && ejp.k == "$scope" { - t = bsontype.CodeWithScope - } else { - err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k) - } - case jpsInvalidState: - err = ejp.err - default: - err = ErrInvalidJSON - } - case bsontype.CodeWithScope: - err = errors.New("invalid extended JSON: code with $scope must contain $code before $scope") - } - } - } - - return t, err -} - -// readKey parses the next key and its type and returns them -func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) { - if ejp.emptyObject { - ejp.emptyObject = false - return "", 0, ErrEOD - } - - // advance to key (or return with error) - switch ejp.s { - case jpsStartState: - ejp.advanceState() - if ejp.s == jpsSawBeginObject { - ejp.advanceState() - } - case jpsSawBeginObject: - ejp.advanceState() - case jpsSawValue, jpsSawEndObject, jpsSawEndArray: - ejp.advanceState() - switch ejp.s { - case jpsSawBeginObject, jpsSawComma: - ejp.advanceState() - case jpsSawEndObject: - return "", 0, ErrEOD - case jpsDoneState: - return "", 0, io.EOF - case jpsInvalidState: - return "", 0, ejp.err - default: - return "", 0, ErrInvalidJSON - } - case jpsSawKey: // do nothing (key was peeked before) - default: - return "", 0, invalidRequestError("key") - } - - // read key - var key string - - switch ejp.s { - case jpsSawKey: - key = ejp.k - case jpsSawEndObject: - return "", 0, ErrEOD - case jpsInvalidState: - return "", 0, ejp.err - default: - return "", 0, invalidRequestError("key") - } - - // check for colon - ejp.advanceState() - if err := ensureColon(ejp.s, key); err != nil { - return "", 0, err - } - - // peek at the value to determine type - t, err := ejp.peekType() - if err != nil { - return "", 0, err - } - - return key, t, nil -} - -// readValue returns the value corresponding to the Type returned by peekType -func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { - if ejp.s == jpsInvalidState { - return nil, ejp.err - } - - var v *extJSONValue - - switch t { - case bsontype.Null, bsontype.Boolean, bsontype.String: - if ejp.s != jpsSawValue { - return nil, invalidRequestError(t.String()) - } - v = ejp.v - case bsontype.Int32, bsontype.Int64, bsontype.Double: - // relaxed version allows these to be literal number values - if ejp.s == jpsSawValue { - v = ejp.v - break - } - fallthrough - case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined: - switch ejp.s { - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read value - ejp.advanceState() - if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) { - return nil, invalidJSONErrorForType("value", t) - } - - v = ejp.v - - // read end object - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("} after value", t) - } - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer: - if ejp.s != jpsSawKey { - return nil, invalidRequestError(t.String()) - } - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - ejp.advanceState() - if t == bsontype.Binary && ejp.s == jpsSawValue { - // convert relaxed $uuid format - if ejp.relaxedUUID { - defer func() { ejp.relaxedUUID = false }() - uuid, err := ejp.v.parseSymbol() - if err != nil { - return nil, err - } - - // RFC 4122 defines the length of a UUID as 36 and the hyphens in a UUID as appearing - // in the 8th, 13th, 18th, and 23rd characters. - // - // See https://tools.ietf.org/html/rfc4122#section-3 - valid := len(uuid) == 36 && - string(uuid[8]) == "-" && - string(uuid[13]) == "-" && - string(uuid[18]) == "-" && - string(uuid[23]) == "-" - if !valid { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") - } - - // remove hyphens - uuidNoHyphens := strings.Replace(uuid, "-", "", -1) - if len(uuidNoHyphens) != 32 { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") - } - - // convert hex to bytes - bytes, err := hex.DecodeString(uuidNoHyphens) - if err != nil { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %v", err) - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("$uuid and value and then }", bsontype.Binary) - } - - base64 := &extJSONValue{ - t: bsontype.String, - v: base64.StdEncoding.EncodeToString(bytes), - } - subType := &extJSONValue{ - t: bsontype.String, - v: "04", - } - - v = &extJSONValue{ - t: bsontype.EmbeddedDocument, - v: &extJSONObject{ - keys: []string{"base64", "subType"}, - values: []*extJSONValue{base64, subType}, - }, - } - - break - } - - // convert legacy $binary format - base64 := ejp.v - - ejp.advanceState() - if ejp.s != jpsSawComma { - return nil, invalidJSONErrorForType(",", bsontype.Binary) - } - - ejp.advanceState() - key, t, err := ejp.readKey() - if err != nil { - return nil, err - } - if key != "$type" { - return nil, invalidJSONErrorForType("$type", bsontype.Binary) - } - - subType, err := ejp.readValue(t) - if err != nil { - return nil, err - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("2 key-value pairs and then }", bsontype.Binary) - } - - v = &extJSONValue{ - t: bsontype.EmbeddedDocument, - v: &extJSONObject{ - keys: []string{"base64", "subType"}, - values: []*extJSONValue{base64, subType}, - }, - } - break - } - - // read KV pairs - if ejp.s != jpsSawBeginObject { - return nil, invalidJSONErrorForType("{", t) - } - - keys, vals, err := ejp.readObject(2, true) - if err != nil { - return nil, err - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("2 key-value pairs and then }", t) - } - - v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} - - case bsontype.DateTime: - switch ejp.s { - case jpsSawValue: - v = ejp.v - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - ejp.advanceState() - switch ejp.s { - case jpsSawBeginObject: - keys, vals, err := ejp.readObject(1, true) - if err != nil { - return nil, err - } - v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} - case jpsSawValue: - if ejp.canonical { - return nil, invalidJSONError("{") - } - v = ejp.v - default: - if ejp.canonical { - return nil, invalidJSONErrorForType("object", t) - } - return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t) - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("value and then }", t) - } - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.JavaScript: - switch ejp.s { - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read value - ejp.advanceState() - if ejp.s != jpsSawValue { - return nil, invalidJSONErrorForType("value", t) - } - v = ejp.v - - // read end object or comma and just return - ejp.advanceState() - case jpsSawEndObject: - v = ejp.v - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.CodeWithScope: - if ejp.s == jpsSawKey && ejp.k == "$scope" { - v = ejp.v // this is the $code string from earlier - - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read { - ejp.advanceState() - if ejp.s != jpsSawBeginObject { - return nil, invalidJSONError("$scope to be embedded document") - } - } else { - return nil, invalidRequestError(t.String()) - } - case bsontype.EmbeddedDocument, bsontype.Array: - return nil, invalidRequestError(t.String()) - } - - return v, nil -} - -// readObject is a utility method for reading full objects of known (or expected) size -// it is useful for extended JSON types such as binary, datetime, regex, and timestamp -func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) { - keys := make([]string, numKeys) - vals := make([]*extJSONValue, numKeys) - - if !started { - ejp.advanceState() - if ejp.s != jpsSawBeginObject { - return nil, nil, invalidJSONError("{") - } - } - - for i := 0; i < numKeys; i++ { - key, t, err := ejp.readKey() - if err != nil { - return nil, nil, err - } - - switch ejp.s { - case jpsSawKey: - v, err := ejp.readValue(t) - if err != nil { - return nil, nil, err - } - - keys[i] = key - vals[i] = v - case jpsSawValue: - keys[i] = key - vals[i] = ejp.v - default: - return nil, nil, invalidJSONError("value") - } - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, nil, invalidJSONError("}") - } - - return keys, vals, nil -} - -// advanceState reads the next JSON token from the scanner and transitions -// from the current state based on that token's type -func (ejp *extJSONParser) advanceState() { - if ejp.s == jpsDoneState || ejp.s == jpsInvalidState { - return - } - - jt, err := ejp.js.nextToken() - - if err != nil { - ejp.err = err - ejp.s = jpsInvalidState - return - } - - valid := ejp.validateToken(jt.t) - if !valid { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - return - } - - switch jt.t { - case jttBeginObject: - ejp.s = jpsSawBeginObject - ejp.pushMode(jpmObjectMode) - ejp.depth++ - - if ejp.depth > ejp.maxDepth { - ejp.err = nestingDepthError(jt.p, ejp.depth) - ejp.s = jpsInvalidState - } - case jttEndObject: - ejp.s = jpsSawEndObject - ejp.depth-- - - if ejp.popMode() != jpmObjectMode { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttBeginArray: - ejp.s = jpsSawBeginArray - ejp.pushMode(jpmArrayMode) - case jttEndArray: - ejp.s = jpsSawEndArray - - if ejp.popMode() != jpmArrayMode { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttColon: - ejp.s = jpsSawColon - case jttComma: - ejp.s = jpsSawComma - case jttEOF: - ejp.s = jpsDoneState - if len(ejp.m) != 0 { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttString: - switch ejp.s { - case jpsSawComma: - if ejp.peekMode() == jpmArrayMode { - ejp.s = jpsSawValue - ejp.v = extendJSONToken(jt) - return - } - fallthrough - case jpsSawBeginObject: - ejp.s = jpsSawKey - ejp.k = jt.v.(string) - return - } - fallthrough - default: - ejp.s = jpsSawValue - ejp.v = extendJSONToken(jt) - } -} - -var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{ - jpsStartState: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - jttEOF: true, - }, - jpsSawBeginObject: { - jttEndObject: true, - jttString: true, - }, - jpsSawEndObject: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsSawBeginArray: { - jttBeginObject: true, - jttBeginArray: true, - jttEndArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawEndArray: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsSawColon: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawComma: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawKey: { - jttColon: true, - }, - jpsSawValue: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsDoneState: {}, - jpsInvalidState: {}, -} - -func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool { - switch ejp.s { - case jpsSawEndObject: - // if we are at depth zero and the next token is a '{', - // we can consider it valid only if we are not in array mode. - if jtt == jttBeginObject && ejp.depth == 0 { - return ejp.peekMode() != jpmArrayMode - } - case jpsSawComma: - switch ejp.peekMode() { - // the only valid next token after a comma inside a document is a string (a key) - case jpmObjectMode: - return jtt == jttString - case jpmInvalidMode: - return false - } - } - - _, ok := jpsValidTransitionTokens[ejp.s][jtt] - return ok -} - -// ensureExtValueType returns true if the current value has the expected -// value type for single-key extended JSON types. For example, -// {"$numberInt": v} v must be TypeString -func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool { - switch t { - case bsontype.MinKey, bsontype.MaxKey: - return ejp.v.t == bsontype.Int32 - case bsontype.Undefined: - return ejp.v.t == bsontype.Boolean - case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID: - return ejp.v.t == bsontype.String - default: - return false - } -} - -func (ejp *extJSONParser) pushMode(m jsonParseMode) { - ejp.m = append(ejp.m, m) -} - -func (ejp *extJSONParser) popMode() jsonParseMode { - l := len(ejp.m) - if l == 0 { - return jpmInvalidMode - } - - m := ejp.m[l-1] - ejp.m = ejp.m[:l-1] - - return m -} - -func (ejp *extJSONParser) peekMode() jsonParseMode { - l := len(ejp.m) - if l == 0 { - return jpmInvalidMode - } - - return ejp.m[l-1] -} - -func extendJSONToken(jt *jsonToken) *extJSONValue { - var t bsontype.Type - - switch jt.t { - case jttInt32: - t = bsontype.Int32 - case jttInt64: - t = bsontype.Int64 - case jttDouble: - t = bsontype.Double - case jttString: - t = bsontype.String - case jttBool: - t = bsontype.Boolean - case jttNull: - t = bsontype.Null - default: - return nil - } - - return &extJSONValue{t: t, v: jt.v} -} - -func ensureColon(s jsonParseState, key string) error { - if s != jpsSawColon { - return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key) - } - - return nil -} - -func invalidRequestError(s string) error { - return fmt.Errorf("invalid request to read %s", s) -} - -func invalidJSONError(expected string) error { - return fmt.Errorf("invalid JSON input; expected %s", expected) -} - -func invalidJSONErrorForType(expected string, t bsontype.Type) error { - return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t) -} - -func unexpectedTokenError(jt *jsonToken) error { - switch jt.t { - case jttInt32, jttInt64, jttDouble: - return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p) - case jttString: - return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p) - case jttBool: - return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p) - case jttNull: - return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p) - case jttEOF: - return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p) - default: - return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p) - } -} - -func nestingDepthError(p, depth int) error { - return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go deleted file mode 100644 index 2aca37a91f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go +++ /dev/null @@ -1,652 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "fmt" - "io" - "sync" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -type ExtJSONValueReaderPool struct { - pool sync.Pool -} - -// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { - return &ExtJSONValueReaderPool{ - pool: sync.Pool{ - New: func() interface{} { - return new(extJSONValueReader) - }, - }, - } -} - -// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { - vr := bvrp.pool.Get().(*extJSONValueReader) - return vr.reset(r, canonical) -} - -// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing -// is inserted into the pool and ok will be false. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { - bvr, ok := vr.(*extJSONValueReader) - if !ok { - return false - } - - bvr, _ = bvr.reset(nil, false) - bvrp.pool.Put(bvr) - return true -} - -type ejvrState struct { - mode mode - vType bsontype.Type - depth int -} - -// extJSONValueReader is for reading extended JSON. -type extJSONValueReader struct { - p *extJSONParser - - stack []ejvrState - frame int -} - -// NewExtJSONValueReader creates a new ValueReader from a given io.Reader -// It will interpret the JSON of r as canonical or relaxed according to the -// given canonical flag -func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) { - return newExtJSONValueReader(r, canonical) -} - -func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) { - ejvr := new(extJSONValueReader) - return ejvr.reset(r, canonical) -} - -func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) { - p := newExtJSONParser(r, canonical) - typ, err := p.peekType() - - if err != nil { - return nil, ErrInvalidJSON - } - - var m mode - switch typ { - case bsontype.EmbeddedDocument: - m = mTopLevel - case bsontype.Array: - m = mArray - default: - m = mValue - } - - stack := make([]ejvrState, 1, 5) - stack[0] = ejvrState{ - mode: m, - vType: typ, - } - return &extJSONValueReader{ - p: p, - stack: stack, - }, nil -} - -func (ejvr *extJSONValueReader) advanceFrame() { - if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack - length := len(ejvr.stack) - if length+1 >= cap(ejvr.stack) { - // double it - buf := make([]ejvrState, 2*cap(ejvr.stack)+1) - copy(buf, ejvr.stack) - ejvr.stack = buf - } - ejvr.stack = ejvr.stack[:length+1] - } - ejvr.frame++ - - // Clean the stack - ejvr.stack[ejvr.frame].mode = 0 - ejvr.stack[ejvr.frame].vType = 0 - ejvr.stack[ejvr.frame].depth = 0 -} - -func (ejvr *extJSONValueReader) pushDocument() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mDocument - ejvr.stack[ejvr.frame].depth = ejvr.p.depth -} - -func (ejvr *extJSONValueReader) pushCodeWithScope() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mCodeWithScope -} - -func (ejvr *extJSONValueReader) pushArray() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mArray -} - -func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = m - ejvr.stack[ejvr.frame].vType = t -} - -func (ejvr *extJSONValueReader) pop() { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - ejvr.frame-- - case mDocument, mArray, mCodeWithScope: - ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc... - } -} - -func (ejvr *extJSONValueReader) skipObject() { - // read entire object until depth returns to 0 (last ending } or ] seen) - depth := 1 - for depth > 0 { - ejvr.p.advanceState() - - // If object is empty, raise depth and continue. When emptyObject is true, the - // parser has already read both the opening and closing brackets of an empty - // object ("{}"), so the next valid token will be part of the parent document, - // not part of the nested document. - // - // If there is a comma, there are remaining fields, emptyObject must be set back - // to false, and comma must be skipped with advanceState(). - if ejvr.p.emptyObject { - if ejvr.p.s == jpsSawComma { - ejvr.p.emptyObject = false - ejvr.p.advanceState() - } - depth-- - continue - } - - switch ejvr.p.s { - case jpsSawBeginObject, jpsSawBeginArray: - depth++ - case jpsSawEndObject, jpsSawEndArray: - depth-- - } - } -} - -func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error { - te := TransitionError{ - name: name, - current: ejvr.stack[ejvr.frame].mode, - destination: destination, - modes: modes, - action: "read", - } - if ejvr.frame != 0 { - te.parent = ejvr.stack[ejvr.frame-1].mode - } - return te -} - -func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error { - return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t) -} - -func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - if ejvr.stack[ejvr.frame].vType != t { - return ejvr.typeError(t) - } - default: - modes := []mode{mElement, mValue} - if addModes != nil { - modes = append(modes, addModes...) - } - return ejvr.invalidTransitionErr(destination, callerName, modes) - } - - return nil -} - -func (ejvr *extJSONValueReader) Type() bsontype.Type { - return ejvr.stack[ejvr.frame].vType -} - -func (ejvr *extJSONValueReader) Skip() error { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - default: - return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue}) - } - - defer ejvr.pop() - - t := ejvr.stack[ejvr.frame].vType - switch t { - case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope: - // read entire array, doc or CodeWithScope - ejvr.skipObject() - default: - _, err := ejvr.p.readValue(t) - if err != nil { - return err - } - } - - return nil -} - -func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel: // allow reading array from top level - case mArray: - return ejvr, nil - default: - if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil { - return nil, err - } - } - - ejvr.pushArray() - - return ejvr, nil -} - -func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) { - if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil { - return nil, 0, err - } - - v, err := ejvr.p.readValue(bsontype.Binary) - if err != nil { - return nil, 0, err - } - - b, btype, err = v.parseBinary() - - ejvr.pop() - return b, btype, err -} - -func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) { - if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil { - return false, err - } - - v, err := ejvr.p.readValue(bsontype.Boolean) - if err != nil { - return false, err - } - - if v.t != bsontype.Boolean { - return false, fmt.Errorf("expected type bool, but got type %s", v.t) - } - - ejvr.pop() - return v.v.(bool), nil -} - -func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel: - return ejvr, nil - case mElement, mValue: - if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument { - return nil, ejvr.typeError(bsontype.EmbeddedDocument) - } - - ejvr.pushDocument() - return ejvr, nil - default: - return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue}) - } -} - -func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) { - if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil { - return "", nil, err - } - - v, err := ejvr.p.readValue(bsontype.CodeWithScope) - if err != nil { - return "", nil, err - } - - code, err = v.parseJavascript() - - ejvr.pushCodeWithScope() - return code, ejvr, err -} - -func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) { - if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil { - return "", primitive.NilObjectID, err - } - - v, err := ejvr.p.readValue(bsontype.DBPointer) - if err != nil { - return "", primitive.NilObjectID, err - } - - ns, oid, err = v.parseDBPointer() - - ejvr.pop() - return ns, oid, err -} - -func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) { - if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.DateTime) - if err != nil { - return 0, err - } - - d, err := v.parseDateTime() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) { - if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil { - return primitive.Decimal128{}, err - } - - v, err := ejvr.p.readValue(bsontype.Decimal128) - if err != nil { - return primitive.Decimal128{}, err - } - - d, err := v.parseDecimal128() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadDouble() (float64, error) { - if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Double) - if err != nil { - return 0, err - } - - d, err := v.parseDouble() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadInt32() (int32, error) { - if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Int32) - if err != nil { - return 0, err - } - - i, err := v.parseInt32() - - ejvr.pop() - return i, err -} - -func (ejvr *extJSONValueReader) ReadInt64() (int64, error) { - if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Int64) - if err != nil { - return 0, err - } - - i, err := v.parseInt64() - - ejvr.pop() - return i, err -} - -func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) { - if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.JavaScript) - if err != nil { - return "", err - } - - code, err = v.parseJavascript() - - ejvr.pop() - return code, err -} - -func (ejvr *extJSONValueReader) ReadMaxKey() error { - if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.MaxKey) - if err != nil { - return err - } - - err = v.parseMinMaxKey("max") - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadMinKey() error { - if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.MinKey) - if err != nil { - return err - } - - err = v.parseMinMaxKey("min") - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadNull() error { - if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.Null) - if err != nil { - return err - } - - if v.t != bsontype.Null { - return fmt.Errorf("expected type null but got type %s", v.t) - } - - ejvr.pop() - return nil -} - -func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) { - if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil { - return primitive.ObjectID{}, err - } - - v, err := ejvr.p.readValue(bsontype.ObjectID) - if err != nil { - return primitive.ObjectID{}, err - } - - oid, err := v.parseObjectID() - - ejvr.pop() - return oid, err -} - -func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) { - if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil { - return "", "", err - } - - v, err := ejvr.p.readValue(bsontype.Regex) - if err != nil { - return "", "", err - } - - pattern, options, err = v.parseRegex() - - ejvr.pop() - return pattern, options, err -} - -func (ejvr *extJSONValueReader) ReadString() (string, error) { - if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.String) - if err != nil { - return "", err - } - - if v.t != bsontype.String { - return "", fmt.Errorf("expected type string but got type %s", v.t) - } - - ejvr.pop() - return v.v.(string), nil -} - -func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) { - if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.Symbol) - if err != nil { - return "", err - } - - symbol, err = v.parseSymbol() - - ejvr.pop() - return symbol, err -} - -func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) { - if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil { - return 0, 0, err - } - - v, err := ejvr.p.readValue(bsontype.Timestamp) - if err != nil { - return 0, 0, err - } - - t, i, err = v.parseTimestamp() - - ejvr.pop() - return t, i, err -} - -func (ejvr *extJSONValueReader) ReadUndefined() error { - if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.Undefined) - if err != nil { - return err - } - - err = v.parseUndefined() - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel, mDocument, mCodeWithScope: - default: - return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope}) - } - - name, t, err := ejvr.p.readKey() - - if err != nil { - if err == ErrEOD { - if ejvr.stack[ejvr.frame].mode == mCodeWithScope { - _, err := ejvr.p.peekType() - if err != nil { - return "", nil, err - } - } - - ejvr.pop() - } - - return "", nil, err - } - - ejvr.push(mElement, t) - return name, ejvr, nil -} - -func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mArray: - default: - return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray}) - } - - t, err := ejvr.p.peekType() - if err != nil { - if err == ErrEOA { - ejvr.pop() - } - - return nil, err - } - - ejvr.push(mValue, t) - return ejvr, nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go deleted file mode 100644 index ba39c9601f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on github.com/golang/go by The Go Authors -// See THIRD-PARTY-NOTICES for original license terms. - -package bsonrw - -import "unicode/utf8" - -// safeSet holds the value true if the ASCII character with the given array -// position can be represented inside a JSON string without any further -// escaping. -// -// All values are true except for the ASCII control characters (0-31), the -// double quote ("), and the backslash character ("\"). -var safeSet = [utf8.RuneSelf]bool{ - ' ': true, - '!': true, - '"': false, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '(': true, - ')': true, - '*': true, - '+': true, - ',': true, - '-': true, - '.': true, - '/': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - ':': true, - ';': true, - '<': true, - '=': true, - '>': true, - '?': true, - '@': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'V': true, - 'W': true, - 'X': true, - 'Y': true, - 'Z': true, - '[': true, - '\\': false, - ']': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '{': true, - '|': true, - '}': true, - '~': true, - '\u007f': true, -} - -// htmlSafeSet holds the value true if the ASCII character with the given -// array position can be safely represented inside a JSON string, embedded -// inside of HTML